You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/05/14 17:44:04 UTC

[01/50] [abbrv] hadoop git commit: HADOOP-15356. Make HTTP timeout configurable in ADLS connector. Contributed by Atul Sikaria and Sean Mackrory.

Repository: hadoop
Updated Branches:
  refs/heads/HDDS-4 abdb24754 -> c9c79f775


HADOOP-15356. Make HTTP timeout configurable in ADLS connector. Contributed by Atul Sikaria and Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d983701b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d983701b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d983701b

Branch: refs/heads/HDDS-4
Commit: d983701bbec87da47edbf7df3187dee8be1859f9
Parents: abdb247
Author: Sean Mackrory <ma...@apache.org>
Authored: Fri Apr 27 17:17:02 2018 -0600
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml         |  9 +++
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  1 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 13 ++++
 .../src/site/markdown/troubleshooting_adl.md    |  9 +++
 .../fs/adl/live/AdlStorageConfiguration.java    | 12 ++-
 .../fs/adl/live/TestAdlSdkConfiguration.java    | 77 ++++++++++++++++++++
 6 files changed, 117 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d983701b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index dc13cb9..98b0ddf 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2929,6 +2929,15 @@
     </description>
   </property>
 
+  <property>
+    <name>adl.http.timeout</name>
+    <value>-1</value>
+    <description>
+      Base timeout (in seconds) for HTTP requests from the ADL SDK. Values of
+      zero or less cause the SDK default to be used instead.
+    </description>
+  </property>
+
   <!-- Azure Data Lake File System Configurations Ends Here-->
 
   <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d983701b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index e3a4ad6..e124e11 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -105,6 +105,7 @@ public final class AdlConfKeys {
   static final String ADL_ENABLEUPN_FOR_OWNERGROUP_KEY =
       "adl.feature.ownerandgroup.enableupn";
   static final boolean ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT = false;
+  public static final String ADL_HTTP_TIMEOUT = "adl.http.timeout";
 
   public static void addDeprecatedKeys() {
     Configuration.addDeprecations(new DeprecationDelta[]{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d983701b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index aa6babe..3e149a6 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -195,6 +195,14 @@ public class AdlFileSystem extends FileSystem {
         VersionInfo.getVersion().trim() + "/" + clusterName + "/"
         + clusterType);
 
+    int timeout = conf.getInt(ADL_HTTP_TIMEOUT, -1);
+    if (timeout > 0) {
+      // only set timeout if specified in config. Otherwise use SDK default
+      options.setDefaultTimeout(timeout);
+    } else {
+      LOG.info("No valid ADL SDK timeout configured: using SDK default.");
+    }
+
     adlClient.setOptions(options);
 
     boolean trackLatency = conf
@@ -320,6 +328,11 @@ public class AdlFileSystem extends FileSystem {
     return azureTokenProvider;
   }
 
+  @VisibleForTesting
+  public ADLStoreClient getAdlClient() {
+    return adlClient;
+  }
+
   /**
    * Constructing home directory locally is fine as long as Hadoop
    * local user name and ADL user name relationship story is not fully baked

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d983701b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
index 80b2a6f..b362a9c 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
@@ -144,3 +144,12 @@ org.apache.hadoop.security.AccessControlException: MKDIRS failed with error 0x83
 ```
 
 See "Adding the service principal to your ADL Account".
+
+## Timeouts
+
+The timeout used by the ADL SDK can be overridden with the hadoop property
+`adl.http.timeout`.  Some timeouts in compute frameworks may need to be
+addressed by lowering the timeout used by the SDK.  A lower timeout at the
+storage layer may allow more retries to be attempted and actually increase
+the likelihood of success before hitting the framework's timeout, as attempts
+that may ultimately fail will fail faster.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d983701b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/AdlStorageConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/AdlStorageConfiguration.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/AdlStorageConfiguration.java
index a9b4ab8..1da40ad 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/AdlStorageConfiguration.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/AdlStorageConfiguration.java
@@ -79,20 +79,24 @@ public final class AdlStorageConfiguration {
     if (conf == null) {
       conf = getConfiguration();
     }
+    return createStorageConnector(conf);
+  }
 
+  public synchronized static FileSystem createStorageConnector(
+      Configuration fsConfig) throws URISyntaxException, IOException {
     if (!isContractTestEnabled()) {
       return null;
     }
 
-    String fileSystem = conf.get(FILE_SYSTEM_KEY);
+    String fileSystem = fsConfig.get(FILE_SYSTEM_KEY);
     if (fileSystem == null || fileSystem.trim().length() == 0) {
       throw new IOException("Default file system not configured.");
     }
 
-    Class<?> clazz = conf.getClass(FILE_SYSTEM_IMPL_KEY,
+    Class<?> clazz = fsConfig.getClass(FILE_SYSTEM_IMPL_KEY,
         FILE_SYSTEM_IMPL_DEFAULT);
-    FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, conf);
-    fs.initialize(new URI(fileSystem), conf);
+    FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, fsConfig);
+    fs.initialize(new URI(fileSystem), fsConfig);
     return fs;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d983701b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java
new file mode 100644
index 0000000..ca762d9
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.adl.AdlFileSystem;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_HTTP_TIMEOUT;
+
+/**
+ * Tests interactions with SDK and ensures configuration is having the desired
+ * effect.
+ */
+public class TestAdlSdkConfiguration {
+  @Test
+  public void testDefaultTimeout() throws IOException {
+    AdlFileSystem fs = null;
+    Configuration conf = null;
+    int effectiveTimeout;
+
+    conf = AdlStorageConfiguration.getConfiguration();
+    conf.setInt(ADL_HTTP_TIMEOUT, -1);
+    try {
+      fs = (AdlFileSystem)
+          (AdlStorageConfiguration.createStorageConnector(conf));
+    } catch (URISyntaxException e) {
+      throw new IllegalStateException("Can not initialize ADL FileSystem. "
+          + "Please check test.fs.adl.name property.", e);
+    }
+
+    // Skip this test if we can't get a real FS
+    Assume.assumeNotNull(fs);
+
+    effectiveTimeout = fs.getAdlClient().getDefaultTimeout();
+    Assert.assertFalse("A negative timeout is not supposed to take effect",
+        effectiveTimeout < 0);
+
+    conf = AdlStorageConfiguration.getConfiguration();
+    conf.setInt(ADL_HTTP_TIMEOUT, 17);
+    try {
+      fs = (AdlFileSystem)
+          (AdlStorageConfiguration.createStorageConnector(conf));
+    } catch (URISyntaxException e) {
+      throw new IllegalStateException("Can not initialize ADL FileSystem. "
+          + "Please check test.fs.adl.name property.", e);
+    }
+
+    effectiveTimeout = fs.getAdlClient().getDefaultTimeout();
+    Assert.assertEquals("Timeout is getting set",
+        effectiveTimeout, 17);
+
+    // The default value may vary by SDK, so that value is not tested here.
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-8288. Fix wrong number of table columns in Resource Model doc. Contributed by Weiwei Yang.

Posted by xy...@apache.org.
YARN-8288. Fix wrong number of table columns in Resource Model doc. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2af3970d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2af3970d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2af3970d

Branch: refs/heads/HDDS-4
Commit: 2af3970dd74a35fb41b4042a18574d1ea8f28d60
Parents: 82e41a3
Author: Naganarasimha <na...@apache.org>
Authored: Tue May 15 00:03:38 2018 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/ResourceModel.md | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2af3970d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
index 75e5c92..f968b5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
@@ -45,20 +45,20 @@ The following configuration properties are supported. See below for details.
 
 `resource-types.xml`
 
-| Configuration Property | Value | Description |
-|:---- |:---- |:---- |
+| Configuration Property | Description |
+|:---- |:---- |
 | `yarn.resource-types` | Comma-separated list of additional resources. May not include `memory`, `memory-mb`, or `vcores` |
 | `yarn.resource-types.<resource>.units` | Default unit for the specified resource type |
 | `yarn.resource-types.<resource>.minimum` | The minimum request for the specified resource type |
 | `yarn.resource-types.<resource>.maximum` | The maximum request for the specified resource type |
 
-`nodeĀ­-resources.xml`
+`node-resources.xml`
 
-| Configuration Property | Value | Description |
-|:---- |:---- |:---- |
+| Configuration Property | Description |
+|:---- |:---- |
 | `yarn.nodemanager.resource-type.<resource>` | The count of the specified resource available from the node manager |
 
-Please note that the `resource-types.xml` and `nodeĀ­-resources.xml` files
+Please note that the `resource-types.xml` and `node-resources.xml` files
 also need to be placed in the same configuration directory as `yarn-site.xml` if
 they are used. Alternatively, the properties may be placed into the
 `yarn-site.xml` file instead.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: YARN-8268. Fair scheduler: reservable queue is configured both as parent and leaf queue. (Gergo Repas via Haibo Chen)

Posted by xy...@apache.org.
YARN-8268. Fair scheduler: reservable queue is configured both as parent and leaf queue. (Gergo Repas via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a95bd94b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a95bd94b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a95bd94b

Branch: refs/heads/HDDS-4
Commit: a95bd94b2d9a53c1646730adf6f9535bd9c4f7de
Parents: a67f2c5
Author: Haibo Chen <ha...@apache.org>
Authored: Fri May 11 11:28:05 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../fair/allocation/AllocationFileQueueParser.java        |  5 ++++-
 .../scheduler/fair/TestAllocationFileLoaderService.java   | 10 ++++++++++
 2 files changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a95bd94b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index ec7e4a4..d5a436e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -217,7 +217,10 @@ public class AllocationFileQueueParser {
     // if a leaf in the alloc file is marked as type='parent'
     // then store it as a parent queue
     if (isLeaf && !"parent".equals(element.getAttribute("type"))) {
-      builder.configuredQueues(FSQueueType.LEAF, queueName);
+      // reservable queue has been already configured as parent
+      if (!isReservable) {
+        builder.configuredQueues(FSQueueType.LEAF, queueName);
+      }
     } else {
       if (isReservable) {
         throw new AllocationConfigurationException("The configuration settings"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a95bd94b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
index 5522333..8591d67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
@@ -42,6 +42,9 @@ import java.net.URISyntaxException;
 import java.net.URL;
 import java.nio.charset.StandardCharsets;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
@@ -801,6 +804,13 @@ public class TestAllocationFileLoaderService {
     String nonreservableQueueName = "root.other";
     assertFalse(allocConf.isReservable(nonreservableQueueName));
     assertTrue(allocConf.isReservable(reservableQueueName));
+    Map<FSQueueType, Set<String>> configuredQueues =
+        allocConf.getConfiguredQueues();
+    assertTrue("reservable queue is expected be to a parent queue",
+        configuredQueues.get(FSQueueType.PARENT).contains(reservableQueueName));
+    assertFalse("reservable queue should not be a leaf queue",
+        configuredQueues.get(FSQueueType.LEAF)
+          .contains(reservableQueueName));
 
     assertTrue(allocConf.getMoveOnExpiry(reservableQueueName));
     assertEquals(ReservationSchedulerConfiguration.DEFAULT_RESERVATION_WINDOW,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDDS-18. Ozone Shell should use RestClient and RpcClient. Contributed by Lokesh Jain.

Posted by xy...@apache.org.
HDDS-18. Ozone Shell should use RestClient and RpcClient. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4db209ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4db209ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4db209ba

Branch: refs/heads/HDDS-4
Commit: 4db209baa647c49e19a9ee400ecd9d9901575ed2
Parents: d6d27ce
Author: Anu Engineer <ae...@apache.org>
Authored: Wed May 9 21:23:33 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/ozone/OzoneConsts.java    |  12 +-
 .../src/test/compose/docker-compose.yaml        |   1 +
 .../test/robotframework/acceptance/ozone.robot  |  13 +-
 .../apache/hadoop/ozone/client/ObjectStore.java |  49 +--
 .../apache/hadoop/ozone/client/OzoneBucket.java |  25 +-
 .../ozone/client/OzoneClientException.java      |  54 ++++
 .../hadoop/ozone/client/OzoneClientFactory.java |   2 +-
 .../hadoop/ozone/client/OzoneClientUtils.java   |  86 ++++++
 .../apache/hadoop/ozone/client/OzoneVolume.java |  26 +-
 .../hadoop/ozone/client/rest/RestClient.java    |   8 +-
 .../hadoop/ozone/web/client/OzoneBucket.java    |  47 +--
 .../ozone/web/client/OzoneRestClient.java       |  43 +--
 .../web/client/OzoneRestClientException.java    |  54 ----
 .../hadoop/ozone/web/client/OzoneVolume.java    |  23 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java    | 298 +++++++++++++------
 .../hadoop/ozone/web/client/TestBuckets.java    |   3 +-
 .../hadoop/ozone/web/client/TestVolume.java     |   3 +-
 .../hadoop/ozone/web/ozShell/Handler.java       |  59 +++-
 .../web/ozShell/bucket/CreateBucketHandler.java |  32 +-
 .../web/ozShell/bucket/DeleteBucketHandler.java |  20 +-
 .../web/ozShell/bucket/InfoBucketHandler.java   |  25 +-
 .../web/ozShell/bucket/ListBucketHandler.java   |  51 ++--
 .../web/ozShell/bucket/UpdateBucketHandler.java |  41 +--
 .../web/ozShell/keys/DeleteKeyHandler.java      |  26 +-
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  46 +--
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |  31 +-
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |  48 +--
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  49 ++-
 .../web/ozShell/volume/CreateVolumeHandler.java |  28 +-
 .../web/ozShell/volume/DeleteVolumeHandler.java |  18 +-
 .../web/ozShell/volume/InfoVolumeHandler.java   |  38 +--
 .../web/ozShell/volume/ListVolumeHandler.java   |  58 ++--
 .../web/ozShell/volume/UpdateVolumeHandler.java |  24 +-
 .../org/apache/hadoop/fs/ozone/Constants.java   |   6 -
 .../java/org/apache/hadoop/fs/ozone/OzFs.java   |   3 +-
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |   4 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |   3 +-
 .../fs/ozone/TestOzoneFileInterfaces.java       |   5 +-
 .../hadoop/fs/ozone/contract/OzoneContract.java |   6 +-
 39 files changed, 796 insertions(+), 572 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 2f9e469..451a08f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -58,6 +58,10 @@ public final class OzoneConsts {
   public static final String OZONE_USER = "user";
   public static final String OZONE_REQUEST = "request";
 
+  public static final String OZONE_URI_SCHEME = "o3";
+  public static final String OZONE_HTTP_SCHEME = "http";
+  public static final String OZONE_URI_DELIMITER = "/";
+
   public static final String CONTAINER_EXTENSION = ".container";
   public static final String CONTAINER_META = ".meta";
 
@@ -95,7 +99,13 @@ public final class OzoneConsts {
   /**
    * Supports Bucket Versioning.
    */
-  public enum Versioning {NOT_DEFINED, ENABLED, DISABLED}
+  public enum Versioning {
+    NOT_DEFINED, ENABLED, DISABLED;
+
+    public static Versioning getVersioning(boolean versioning) {
+      return versioning ? ENABLED : DISABLED;
+    }
+  }
 
   /**
    * Ozone handler types.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
index 8350eae..da63f84 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
@@ -39,6 +39,7 @@ services:
         - ./docker-config
    ksm:
       image: apache/hadoop-runner
+      hostname: ksm
       volumes:
          - ${HADOOPDIR}:/opt/hadoop
       ports:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
index ea9131e..1a9cee7 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
@@ -49,14 +49,14 @@ Test rest interface
                     Should contain      ${result}       200 OK
 
 Test ozone cli
-                    Execute on          datanode        ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota 100TB -root
-    ${result} =     Execute on          datanode        ozone oz -listVolume http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | select(.volumeName=="hive")'
+                    Execute on          datanode        ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
                     Should contain      ${result}       createdOn
-                    Execute on          datanode        ozone oz -createBucket http://localhost:9880/hive/bb1
-    ${result}       Execute on          datanode        ozone oz -listBucket http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+                    Execute on          datanode        ozone oz -createBucket http://ksm/hive/bb1
+    ${result}       Execute on          datanode        ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
                     Should Be Equal     ${result}       hive
-                    Execute on          datanode        ozone oz -deleteBucket http://localhost:9880/hive/bb1
-                    Execute on          datanode        ozone oz -deleteVolume http://localhost:9880/hive -user bilbo
+                    Execute on          datanode        ozone oz -deleteBucket http://ksm/hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume http://ksm/hive -user bilbo
 
 
 
@@ -106,6 +106,7 @@ Scale datanodes up
 Execute on
     [arguments]     ${componentname}    ${command}
     ${rc}           ${return} =         Run docker compose          exec ${componentname} ${command}
+    Log             ${return}
     [return]        ${return}
 
 Run docker compose

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index b915213..d8b3011 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -106,25 +106,42 @@ public class ObjectStore {
    */
   public Iterator<OzoneVolume> listVolumes(String volumePrefix)
       throws IOException {
-    return new VolumeIterator(volumePrefix);
+    return listVolumes(volumePrefix, null);
   }
 
   /**
-   * Returns Iterator to iterate over the List of volumes owned by a specific
-   * user. The result can be restricted using volume prefix, will return all
-   * volumes if volume prefix is null. If user is null, returns the volume of
-   * current user.
+   * Returns Iterator to iterate over all the volumes after prevVolume in object
+   * store. If prevVolume is null it iterates from the first volume.
+   * The result can be restricted using volume prefix, will return all
+   * volumes if volume prefix is null.
+   *
+   * @param volumePrefix Volume prefix to match
+   * @param prevVolume Volumes will be listed after this volume name
+   * @return {@code Iterator<OzoneVolume>}
+   */
+  public Iterator<OzoneVolume> listVolumes(String volumePrefix,
+      String prevVolume) throws IOException {
+    return new VolumeIterator(null, volumePrefix, prevVolume);
+  }
+
+  /**
+   * Returns Iterator to iterate over the list of volumes after prevVolume owned
+   * by a specific user. The result can be restricted using volume prefix, will
+   * return all volumes if volume prefix is null. If user is not null, returns
+   * the volume of current user.
    *
    * @param user User Name
    * @param volumePrefix Volume prefix to match
+   * @param prevVolume Volumes will be listed after this volume name
    * @return {@code Iterator<OzoneVolume>}
    */
-  public Iterator<OzoneVolume> listVolumes(String user, String volumePrefix)
+  public Iterator<OzoneVolume> listVolumesByUser(String user,
+      String volumePrefix, String prevVolume)
       throws IOException {
     if(Strings.isNullOrEmpty(user)) {
       user = UserGroupInformation.getCurrentUser().getShortUserName();
     }
-    return new VolumeIterator(user, volumePrefix);
+    return new VolumeIterator(user, volumePrefix, prevVolume);
   }
 
   /**
@@ -150,25 +167,17 @@ public class ObjectStore {
     private OzoneVolume currentValue;
 
     /**
-     * Creates an Iterator to iterate over all volumes in the cluster,
-     * which matches the volume prefix.
-     * @param volPrefix prefix to match
-     */
-    VolumeIterator(String volPrefix) {
-      this(null, volPrefix);
-    }
-
-    /**
-     * Creates an Iterator to iterate over all volumes of the user,
-     * which matches volume prefix.
+     * Creates an Iterator to iterate over all volumes after prevVolume of the user.
+     * If prevVolume is null it iterates from the first volume. The returned volumes
+     * match volume prefix.
      * @param user user name
      * @param volPrefix volume prefix to match
      */
-    VolumeIterator(String user, String volPrefix) {
+    VolumeIterator(String user, String volPrefix, String prevVolume) {
       this.user = user;
       this.volPrefix = volPrefix;
       this.currentValue = null;
-      this.currentIterator = getNextListOfVolumes(null).iterator();
+      this.currentIterator = getNextListOfVolumes(prevVolume).iterator();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 1712979..5df0254 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -291,7 +291,21 @@ public class OzoneBucket {
    * @return {@code Iterator<OzoneKey>}
    */
   public Iterator<OzoneKey> listKeys(String keyPrefix) {
-    return new KeyIterator(keyPrefix);
+    return listKeys(keyPrefix, null);
+  }
+
+  /**
+   * Returns Iterator to iterate over all keys after prevKey in the bucket.
+   * If prevKey is null it iterates from the first key in the bucket.
+   * The result can be restricted using key prefix, will return all
+   * keys if key prefix is null.
+   *
+   * @param keyPrefix Bucket prefix to match
+   * @param prevKey Keys will be listed after this key name
+   * @return {@code Iterator<OzoneKey>}
+   */
+  public Iterator<OzoneKey> listKeys(String keyPrefix, String prevKey) {
+    return new KeyIterator(keyPrefix, prevKey);
   }
 
   /**
@@ -325,14 +339,15 @@ public class OzoneBucket {
 
 
     /**
-     * Creates an Iterator to iterate over all keys in the bucket,
-     * which matches volume prefix.
+     * Creates an Iterator to iterate over all keys after prevKey in the bucket.
+     * If prevKey is null it iterates from the first key in the bucket.
+     * The returned keys match key prefix.
      * @param keyPrefix
      */
-    KeyIterator(String keyPrefix) {
+    KeyIterator(String keyPrefix, String prevKey) {
       this.keyPrefix = keyPrefix;
       this.currentValue = null;
-      this.currentIterator = getNextListOfKeys(null).iterator();
+      this.currentIterator = getNextListOfKeys(prevKey).iterator();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java
new file mode 100644
index 0000000..de3116a
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.client;
+
+import org.apache.hadoop.ozone.client.rest.OzoneException;
+
+/**
+ * This exception is thrown by the Ozone Clients.
+ */
+public class OzoneClientException extends OzoneException {
+  /**
+   * Constructor that allows the shortMessage.
+   *
+   * @param shortMessage Short Message
+   */
+  public OzoneClientException(String shortMessage) {
+    super(0, shortMessage, shortMessage);
+  }
+
+  /**
+   * Constructor that allows a shortMessage and an exception.
+   *
+   * @param shortMessage short message
+   * @param ex exception
+   */
+  public OzoneClientException(String shortMessage, Exception ex) {
+    super(0, shortMessage, shortMessage, ex);
+  }
+
+  /**
+   * Constructor that allows the shortMessage and a longer message.
+   *
+   * @param shortMessage Short Message
+   * @param message long error message
+   */
+  public OzoneClientException(String shortMessage, String message) {
+    super(0, shortMessage, message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 6b24f2a..dae94aa 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -292,7 +292,7 @@ public final class OzoneClientFactory {
       return ctor.newInstance(config);
     } catch (Exception e) {
       final String message = "Couldn't create protocol " + protocolClass;
-      LOG.error(message + " exception:" + e);
+      LOG.error(message + " exception: ", e);
       if (e.getCause() instanceof IOException) {
         throw (IOException) e.getCause();
       } else if (e instanceof InvocationTargetException) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
new file mode 100644
index 0000000..5c83d9b
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.client;
+
+import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
+import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
+import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
+import org.apache.hadoop.ozone.client.rest.response.VolumeOwner;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+
+import static org.apache.hadoop.ozone.web.utils.OzoneUtils.formatTime;
+
+/** A utility class for OzoneClient. */
+public final class OzoneClientUtils {
+
+  private OzoneClientUtils() {}
+
+  /**
+   * Returns a BucketInfo object constructed using fields of the input
+   * OzoneBucket object.
+   *
+   * @param bucket OzoneBucket instance from which BucketInfo object needs to
+   *               be created.
+   * @return BucketInfo instance
+   */
+  public static BucketInfo asBucketInfo(OzoneBucket bucket) {
+    BucketInfo bucketInfo =
+        new BucketInfo(bucket.getVolumeName(), bucket.getName());
+    bucketInfo.setCreatedOn(OzoneUtils.formatTime(bucket.getCreationTime()));
+    bucketInfo.setStorageType(bucket.getStorageType());
+    bucketInfo.setVersioning(
+        OzoneConsts.Versioning.getVersioning(bucket.getVersioning()));
+    return bucketInfo;
+  }
+
+  /**
+   * Returns a VolumeInfo object constructed using fields of the input
+   * OzoneVolume object.
+   *
+   * @param volume OzoneVolume instance from which VolumeInfo object needs to
+   *               be created.
+   * @return VolumeInfo instance
+   */
+  public static VolumeInfo asVolumeInfo(OzoneVolume volume) {
+    VolumeInfo volumeInfo =
+        new VolumeInfo(volume.getName(), formatTime(volume.getCreationTime()),
+            volume.getOwner());
+    volumeInfo.setQuota(OzoneQuota.getOzoneQuota(volume.getQuota()));
+    volumeInfo.setOwner(new VolumeOwner(volume.getOwner()));
+    return volumeInfo;
+  }
+
+  /**
+   * Returns a KeyInfo object constructed using fields of the input
+   * OzoneKey object.
+   *
+   * @param key OzoneKey instance from which KeyInfo object needs to
+   *            be created.
+   * @return KeyInfo instance
+   */
+  public static KeyInfo asKeyInfo(OzoneKey key) {
+    KeyInfo keyInfo = new KeyInfo();
+    keyInfo.setKeyName(key.getName());
+    keyInfo.setCreatedOn(formatTime(key.getCreationTime()));
+    keyInfo.setModifiedOn(formatTime(key.getModificationTime()));
+    keyInfo.setSize(key.getDataSize());
+    return keyInfo;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index c4e7331..4601f1a 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -222,7 +222,22 @@ public class OzoneVolume {
    * @return {@code Iterator<OzoneBucket>}
    */
   public Iterator<OzoneBucket> listBuckets(String bucketPrefix) {
-    return new BucketIterator(bucketPrefix);
+    return listBuckets(bucketPrefix, null);
+  }
+
+  /**
+   * Returns Iterator to iterate over all buckets after prevBucket in the volume.
+   * If prevBucket is null it iterates from the first bucket in the volume.
+   * The result can be restricted using bucket prefix, will return all
+   * buckets if bucket prefix is null.
+   *
+   * @param bucketPrefix Bucket prefix to match
+   * @param prevBucket Buckets are listed after this bucket
+   * @return {@code Iterator<OzoneBucket>}
+   */
+  public Iterator<OzoneBucket> listBuckets(String bucketPrefix,
+      String prevBucket) {
+    return new BucketIterator(bucketPrefix, prevBucket);
   }
 
   /**
@@ -250,14 +265,15 @@ public class OzoneVolume {
 
 
     /**
-     * Creates an Iterator to iterate over all buckets in the volume,
-     * which matches volume prefix.
+     * Creates an Iterator to iterate over all buckets after prevBucket in the volume.
+     * If prevBucket is null it iterates from the first bucket in the volume.
+     * The returned buckets match bucket prefix.
      * @param bucketPrefix
      */
-    BucketIterator(String bucketPrefix) {
+    public BucketIterator(String bucketPrefix, String prevBucket) {
       this.bucketPrefix = bucketPrefix;
       this.currentValue = null;
-      this.currentIterator = getNextListOfBuckets(null).iterator();
+      this.currentIterator = getNextListOfBuckets(prevBucket).iterator();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index e9885d1..1fd2091 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.client.rest.headers.Header;
 import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
 import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
 import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
 import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.protocol.proto
@@ -94,7 +93,7 @@ import static java.net.HttpURLConnection.HTTP_OK;
 public class RestClient implements ClientProtocol {
 
   private static final String PATH_SEPARATOR = "/";
-  private static final Logger LOG = LoggerFactory.getLogger(RpcClient.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RestClient.class);
 
   private final Configuration conf;
   private final URI ozoneRestUri;
@@ -195,8 +194,9 @@ public class RestClient implements ClientProtocol {
 
       ServiceInfo restServer = selector.getRestServer(dataNodeInfos);
 
-      return NetUtils.createSocketAddr(restServer.getHostname() + ":" +
-          restServer.getPort(ServicePort.Type.HTTP));
+      return NetUtils.createSocketAddr(
+          NetUtils.normalizeHostName(restServer.getHostname()) + ":"
+              + restServer.getPort(ServicePort.Type.HTTP));
     } finally {
       EntityUtils.consume(entity);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
index e6fe0ec..3183d03 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.headers.Header;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -169,11 +170,11 @@ public class OzoneBucket {
    */
   public void putKey(String keyName, String data) throws OzoneException {
     if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneRestClientException("Invalid key Name.");
+      throw new OzoneClientException("Invalid key Name.");
     }
 
     if (data == null) {
-      throw new OzoneRestClientException("Invalid data.");
+      throw new OzoneClientException("Invalid data.");
     }
 
     HttpPut putRequest = null;
@@ -195,7 +196,7 @@ public class OzoneBucket {
       }
       executePutKey(putRequest, httpClient);
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       IOUtils.closeStream(is);
       releaseConnection(putRequest);
@@ -211,7 +212,7 @@ public class OzoneBucket {
    */
   public void putKey(File dataFile) throws OzoneException {
     if (dataFile == null) {
-      throw new OzoneRestClientException("Invalid file object.");
+      throw new OzoneClientException("Invalid file object.");
     }
     String keyName = dataFile.getName();
     putKey(keyName, dataFile);
@@ -228,11 +229,11 @@ public class OzoneBucket {
       throws OzoneException {
 
     if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneRestClientException("Invalid key Name");
+      throw new OzoneClientException("Invalid key Name");
     }
 
     if (file == null) {
-      throw new OzoneRestClientException("Invalid data stream");
+      throw new OzoneClientException("Invalid data stream");
     }
 
     HttpPut putRequest = null;
@@ -253,7 +254,7 @@ public class OzoneBucket {
       executePutKey(putRequest, httpClient);
 
     } catch (IOException | URISyntaxException ex) {
-      final OzoneRestClientException orce = new OzoneRestClientException(
+      final OzoneClientException orce = new OzoneClientException(
           "Failed to putKey: keyName=" + keyName + ", file=" + file);
       orce.initCause(ex);
       LOG.trace("", orce);
@@ -285,7 +286,7 @@ public class OzoneBucket {
       }
 
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
 
       throw OzoneException.parse(EntityUtils.toString(entity));
@@ -306,11 +307,11 @@ public class OzoneBucket {
   public void getKey(String keyName, Path downloadTo) throws OzoneException {
 
     if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneRestClientException("Invalid key Name");
+      throw new OzoneClientException("Invalid key Name");
     }
 
     if (downloadTo == null) {
-      throw new OzoneRestClientException("Invalid download path");
+      throw new OzoneClientException("Invalid download path");
     }
 
     FileOutputStream outPutFile = null;
@@ -326,7 +327,7 @@ public class OzoneBucket {
       executeGetKey(getRequest, httpClient, outPutFile);
       outPutFile.flush();
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       IOUtils.closeStream(outPutFile);
       releaseConnection(getRequest);
@@ -343,7 +344,7 @@ public class OzoneBucket {
   public String getKey(String keyName) throws OzoneException {
 
     if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneRestClientException("Invalid key Name");
+      throw new OzoneClientException("Invalid key Name");
     }
 
     HttpGet getRequest = null;
@@ -360,7 +361,7 @@ public class OzoneBucket {
       executeGetKey(getRequest, httpClient, outPutStream);
       return outPutStream.toString(ENCODING_NAME);
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       IOUtils.closeStream(outPutStream);
       releaseConnection(getRequest);
@@ -394,7 +395,7 @@ public class OzoneBucket {
       }
 
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
 
       throw OzoneException.parse(EntityUtils.toString(entity));
@@ -414,7 +415,7 @@ public class OzoneBucket {
   public void deleteKey(String keyName) throws OzoneException {
 
     if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneRestClientException("Invalid key Name");
+      throw new OzoneClientException("Invalid key Name");
     }
 
     HttpDelete deleteRequest = null;
@@ -427,7 +428,7 @@ public class OzoneBucket {
           .getClient().getHttpDelete(builder.toString());
       executeDeleteKey(deleteRequest, httpClient);
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(deleteRequest);
     }
@@ -457,7 +458,7 @@ public class OzoneBucket {
       }
 
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
 
       throw OzoneException.parse(EntityUtils.toString(entity));
@@ -505,7 +506,7 @@ public class OzoneBucket {
       return executeListKeys(getRequest, httpClient);
 
     } catch (IOException | URISyntaxException e) {
-      throw new OzoneRestClientException(e.getMessage(), e);
+      throw new OzoneClientException(e.getMessage(), e);
     } finally {
       releaseConnection(getRequest);
     }
@@ -534,7 +535,7 @@ public class OzoneBucket {
       getRequest = client.getHttpGet(uri.toString());
       return executeListKeys(getRequest, HttpClientBuilder.create().build());
     } catch (IOException | URISyntaxException e) {
-      throw new OzoneRestClientException(e.getMessage());
+      throw new OzoneClientException(e.getMessage());
     } finally {
       releaseConnection(getRequest);
     }
@@ -560,7 +561,7 @@ public class OzoneBucket {
       entity = response.getEntity();
 
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
       if (errorCode == HTTP_OK) {
         String temp = EntityUtils.toString(entity);
@@ -586,7 +587,7 @@ public class OzoneBucket {
    */
   public OzoneKey getKeyInfo(String keyName) throws OzoneException {
     if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Unable to get key info, key name is null or empty");
     }
 
@@ -604,7 +605,7 @@ public class OzoneBucket {
       getRequest = client.getHttpGet(builder.toString());
       return executeGetKeyInfo(getRequest, httpClient);
     } catch (IOException | URISyntaxException e) {
-      throw new OzoneRestClientException(e.getMessage(), e);
+      throw new OzoneClientException(e.getMessage(), e);
     } finally {
       releaseConnection(getRequest);
     }
@@ -627,7 +628,7 @@ public class OzoneBucket {
       int errorCode = response.getStatusLine().getStatusCode();
       entity = response.getEntity();
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
 
       if (errorCode == HTTP_OK) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
index 6d0bbf4..8373f67 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
@@ -24,6 +24,7 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.headers.Header;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
@@ -111,7 +112,7 @@ public class OzoneRestClient implements Closeable {
    */
   public void setEndPointURI(URI endPointURI) throws OzoneException {
     if ((endPointURI == null) || (endPointURI.toString().isEmpty())) {
-      throw new OzoneRestClientException("Invalid ozone URI");
+      throw new OzoneClientException("Invalid ozone URI");
     }
     this.endPointURI = endPointURI;
   }
@@ -151,7 +152,7 @@ public class OzoneRestClient implements Closeable {
    * @param onBehalfOf - The user on behalf we are making the call for
    * @param quota      - Quota's are specified in a specific format. it is
    *                   integer(MB|GB|TB), for example 100TB.
-   * @throws OzoneRestClientException
+   * @throws OzoneClientException
    */
   public OzoneVolume createVolume(String volumeName, String onBehalfOf,
                                   String quota) throws OzoneException {
@@ -169,7 +170,7 @@ public class OzoneRestClient implements Closeable {
       executeCreateVolume(httpPost, httpClient);
       return getVolume(volumeName);
     } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(httpPost);
     }
@@ -196,7 +197,7 @@ public class OzoneRestClient implements Closeable {
       httpGet = getHttpGet(builder.toString());
       return executeInfoVolume(httpGet, httpClient);
     } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(httpGet);
     }
@@ -247,7 +248,7 @@ public class OzoneRestClient implements Closeable {
       }
       return executeListVolume(httpGet, httpClient);
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(httpGet);
     }
@@ -329,7 +330,7 @@ public class OzoneRestClient implements Closeable {
       return executeListVolume(httpGet, httpClient);
 
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(httpGet);
     }
@@ -351,7 +352,7 @@ public class OzoneRestClient implements Closeable {
       httpDelete = getHttpDelete(builder.toString());
       executeDeleteVolume(httpDelete, httpClient);
     } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(httpDelete);
     }
@@ -368,7 +369,7 @@ public class OzoneRestClient implements Closeable {
       throws OzoneException {
     HttpPut putRequest = null;
     if (newOwner == null || newOwner.isEmpty()) {
-      throw new OzoneRestClientException("Invalid new owner name");
+      throw new OzoneClientException("Invalid new owner name");
     }
     try (CloseableHttpClient httpClient = newHttpClient()) {
       OzoneUtils.verifyResourceName(volumeName);
@@ -380,7 +381,7 @@ public class OzoneRestClient implements Closeable {
       executePutVolume(putRequest, httpClient);
 
     } catch (URISyntaxException | IllegalArgumentException | IOException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(putRequest);
     }
@@ -399,7 +400,7 @@ public class OzoneRestClient implements Closeable {
   public void setVolumeQuota(String volumeName, String quota)
       throws OzoneException {
     if (quota == null || quota.isEmpty()) {
-      throw new OzoneRestClientException("Invalid quota");
+      throw new OzoneClientException("Invalid quota");
     }
     HttpPut putRequest = null;
     try (CloseableHttpClient httpClient = newHttpClient()) {
@@ -413,7 +414,7 @@ public class OzoneRestClient implements Closeable {
       executePutVolume(putRequest, httpClient);
 
     } catch (URISyntaxException | IllegalArgumentException | IOException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(putRequest);
     }
@@ -443,7 +444,7 @@ public class OzoneRestClient implements Closeable {
       if (entity != null) {
         throw OzoneException.parse(EntityUtils.toString(entity));
       } else {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
     } finally {
       if (entity != null) {
@@ -470,7 +471,7 @@ public class OzoneRestClient implements Closeable {
 
       entity = response.getEntity();
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
 
       if (errorCode == HTTP_OK) {
@@ -531,7 +532,7 @@ public class OzoneRestClient implements Closeable {
       entity = response.getEntity();
 
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
 
       String temp = EntityUtils.toString(entity);
@@ -595,11 +596,11 @@ public class OzoneRestClient implements Closeable {
     OzoneUtils.verifyResourceName(bucketName);
 
     if (StringUtils.isEmpty(keyName)) {
-      throw new OzoneRestClientException("Invalid key Name");
+      throw new OzoneClientException("Invalid key Name");
     }
 
     if (file == null) {
-      throw new OzoneRestClientException("Invalid data stream");
+      throw new OzoneClientException("Invalid data stream");
     }
 
     HttpPut putRequest = null;
@@ -619,7 +620,7 @@ public class OzoneRestClient implements Closeable {
       putRequest.setHeader(Header.CONTENT_MD5, DigestUtils.md5Hex(fis));
       OzoneBucket.executePutKey(putRequest, httpClient);
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       IOUtils.closeStream(fis);
       releaseConnection(putRequest);
@@ -641,11 +642,11 @@ public class OzoneRestClient implements Closeable {
     OzoneUtils.verifyResourceName(bucketName);
 
     if (StringUtils.isEmpty(keyName)) {
-      throw new OzoneRestClientException("Invalid key Name");
+      throw new OzoneClientException("Invalid key Name");
     }
 
     if (downloadTo == null) {
-      throw new OzoneRestClientException("Invalid download path");
+      throw new OzoneClientException("Invalid download path");
     }
 
     FileOutputStream outPutFile = null;
@@ -661,7 +662,7 @@ public class OzoneRestClient implements Closeable {
       OzoneBucket.executeGetKey(getRequest, httpClient, outPutFile);
       outPutFile.flush();
     } catch (IOException | URISyntaxException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       IOUtils.closeStream(outPutFile);
       releaseConnection(getRequest);
@@ -706,7 +707,7 @@ public class OzoneRestClient implements Closeable {
       getRequest = getHttpGet(builder.toString());
       return OzoneBucket.executeListKeys(getRequest, httpClient);
     } catch (IOException | URISyntaxException e) {
-      throw new OzoneRestClientException(e.getMessage(), e);
+      throw new OzoneClientException(e.getMessage(), e);
     } finally {
       releaseConnection(getRequest);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClientException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClientException.java
deleted file mode 100644
index dfb2357..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClientException.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.client;
-
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-
-/**
- * This exception is thrown by the Ozone Clients.
- */
-public class OzoneRestClientException extends OzoneException {
-  /**
-   * Constructor that allows the shortMessage.
-   *
-   * @param shortMessage Short Message
-   */
-  public OzoneRestClientException(String shortMessage) {
-    super(0, shortMessage, shortMessage);
-  }
-
-  /**
-   * Constructor that allows a shortMessage and an exception.
-   *
-   * @param shortMessage short message
-   * @param ex exception
-   */
-  public OzoneRestClientException(String shortMessage, Exception ex) {
-    super(0, shortMessage, shortMessage, ex);
-  }
-
-  /**
-   * Constructor that allows the shortMessage and a longer message.
-   *
-   * @param shortMessage Short Message
-   * @param message long error message
-   */
-  public OzoneRestClientException(String shortMessage, String message) {
-    super(0, shortMessage, message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java
index 6728e68..9d3831c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java
@@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Strings;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.headers.Header;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
@@ -203,7 +204,7 @@ public class OzoneVolume {
       executeCreateBucket(httpPost, httpClient);
       return getBucket(bucketName);
     } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(httpPost);
     }
@@ -276,7 +277,7 @@ public class OzoneVolume {
       if (entity != null) {
         throw OzoneException.parse(EntityUtils.toString(entity));
       } else {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
     } finally {
       if (entity != null) {
@@ -307,7 +308,7 @@ public class OzoneVolume {
       }
       executePutBucket(putRequest, httpClient);
     } catch (URISyntaxException | IOException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(putRequest);
     }
@@ -336,7 +337,7 @@ public class OzoneVolume {
       }
       executePutBucket(putRequest, httpClient);
     } catch (URISyntaxException | IOException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(putRequest);
     }
@@ -361,7 +362,7 @@ public class OzoneVolume {
       return executeInfoBucket(getRequest, httpClient);
 
     } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(getRequest);
     }
@@ -388,7 +389,7 @@ public class OzoneVolume {
       int errorCode = response.getStatusLine().getStatusCode();
       entity = response.getEntity();
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
       if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
         OzoneBucket bucket =
@@ -432,7 +433,7 @@ public class OzoneVolume {
         throw OzoneException.parse(EntityUtils.toString(entity));
       }
 
-      throw new OzoneRestClientException("Unexpected null in http result");
+      throw new OzoneClientException("Unexpected null in http result");
     } finally {
       if (entity != null) {
         EntityUtils.consumeQuietly(entity);
@@ -467,7 +468,7 @@ public class OzoneVolume {
       return executeListBuckets(getRequest, httpClient);
 
     } catch (IOException | URISyntaxException e) {
-      throw new OzoneRestClientException(e.getMessage(), e);
+      throw new OzoneClientException(e.getMessage(), e);
     } finally {
       releaseConnection(getRequest);
     }
@@ -496,7 +497,7 @@ public class OzoneVolume {
       entity = response.getEntity();
 
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload");
+        throw new OzoneClientException("Unexpected null in http payload");
       }
       if (errorCode == HTTP_OK) {
         ListBuckets bucketList =
@@ -535,7 +536,7 @@ public class OzoneVolume {
       executeDeleteBucket(delRequest, httpClient);
 
     } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneRestClientException(ex.getMessage(), ex);
+      throw new OzoneClientException(ex.getMessage(), ex);
     } finally {
       releaseConnection(delRequest);
     }
@@ -564,7 +565,7 @@ public class OzoneVolume {
       }
 
       if (entity == null) {
-        throw new OzoneRestClientException("Unexpected null in http payload.");
+        throw new OzoneClientException("Unexpected null in http payload.");
       }
 
       throw OzoneException.parse(EntityUtils.toString(entity));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 31046f5..8ffe67d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.ozShell;
 
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -26,15 +27,21 @@ import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Random;
 import java.util.UUID;
+import java.util.stream.Collectors;
 
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -43,11 +50,16 @@ import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneKey;
-import org.apache.hadoop.ozone.web.client.OzoneRestClient;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.ozone.client.rest.RestClient;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.ozone.web.response.BucketInfo;
@@ -63,12 +75,20 @@ import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This test class specified for testing Ozone shell command.
  */
+@RunWith(value = Parameterized.class)
 public class TestOzoneShell {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestOzoneShell.class);
+
   /**
    * Set the timeout for every test.
    */
@@ -79,7 +99,7 @@ public class TestOzoneShell {
   private static File baseDir;
   private static OzoneConfiguration conf = null;
   private static MiniOzoneCluster cluster = null;
-  private static OzoneRestClient client = null;
+  private static ClientProtocol client = null;
   private static Shell shell = null;
 
   private final ByteArrayOutputStream out = new ByteArrayOutputStream();
@@ -87,6 +107,16 @@ public class TestOzoneShell {
   private static final PrintStream OLD_OUT = System.out;
   private static final PrintStream OLD_ERR = System.err;
 
+  @Parameterized.Parameters
+  public static Collection<Object[]> clientProtocol() {
+    Object[][] params = new Object[][] {
+        {RpcClient.class},
+        {RestClient.class}};
+    return Arrays.asList(params);
+  }
+
+  @Parameterized.Parameter
+  public Class clientProtocol;
   /**
    * Create a MiniDFSCluster for testing with using distributed Ozone
    * handler type.
@@ -110,13 +140,12 @@ public class TestOzoneShell {
     shell = new Shell();
     shell.setConf(conf);
 
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue());
+    client = new RpcClient(conf);
     cluster.waitForClusterToBeReady();
-    final int port = cluster.getHddsDatanodes().get(0).getDatanodeDetails()
-        .getOzoneRestPort();
-    url = String.format("http://localhost:%d", port);
-    client = new OzoneRestClient(String.format("http://localhost:%d", port));
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
   }
 
   /**
@@ -137,6 +166,26 @@ public class TestOzoneShell {
   public void setup() {
     System.setOut(new PrintStream(out));
     System.setErr(new PrintStream(err));
+    if(clientProtocol.equals(RestClient.class)) {
+      String hostName = cluster.getKeySpaceManager().getHttpServer()
+          .getHttpAddress().getHostName();
+      int port = cluster
+          .getKeySpaceManager().getHttpServer().getHttpAddress().getPort();
+      url = String.format("http://" + hostName + ":" + port);
+    } else {
+      List<ServiceInfo> services = null;
+      try {
+        services = cluster.getKeySpaceManager().getServiceList();
+      } catch (IOException e) {
+        LOG.error("Could not get service list from KSM");
+      }
+      String hostName = services.stream().filter(
+          a -> a.getNodeType().equals(HddsProtos.NodeType.KSM))
+          .collect(Collectors.toList()).get(0).getHostname();
+
+      String port = cluster.getKeySpaceManager().getRpcPort();
+      url = String.format("o3://" + hostName + ":" + port);
+    }
   }
 
   @After
@@ -152,22 +201,29 @@ public class TestOzoneShell {
 
   @Test
   public void testCreateVolume() throws Exception {
+    LOG.info("Running testCreateVolume");
     String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
     String userName = "bilbo";
     String[] args = new String[] {"-createVolume", url + "/" + volumeName,
         "-user", userName, "-root"};
 
     assertEquals(0, ToolRunner.run(shell, args));
-    OzoneVolume volumeInfo = client.getVolume(volumeName);
-    assertEquals(volumeName, volumeInfo.getVolumeName());
-    assertEquals(userName, volumeInfo.getOwnerName());
+    OzoneVolume volumeInfo = client.getVolumeDetails(volumeName);
+    assertEquals(volumeName, volumeInfo.getName());
+    assertEquals(userName, volumeInfo.getOwner());
   }
 
   @Test
   public void testDeleteVolume() throws Exception {
+    LOG.info("Running testDeleteVolume");
     String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(vol);
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume volume = client.getVolumeDetails(volumeName);
+    assertNotNull(volume);
 
     String[] args = new String[] {"-deleteVolume", url + "/" + volumeName,
         "-root"};
@@ -175,9 +231,9 @@ public class TestOzoneShell {
 
     // verify if volume has been deleted
     try {
-      client.getVolume(volumeName);
+      client.getVolumeDetails(volumeName);
       fail("Get volume call should have thrown.");
-    } catch (OzoneException e) {
+    } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           "Info Volume failed, error:VOLUME_NOT_FOUND", e);
     }
@@ -185,8 +241,13 @@ public class TestOzoneShell {
 
   @Test
   public void testInfoVolume() throws Exception {
+    LOG.info("Running testInfoVolume");
     String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    client.createVolume(volumeName, "bilbo", "100TB");
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
 
     String[] args = new String[] {"-infoVolume", url + "/" + volumeName,
         "-root"};
@@ -206,45 +267,53 @@ public class TestOzoneShell {
 
   @Test
   public void testUpdateVolume() throws Exception {
+    LOG.info("Running testUpdateVolume");
     String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
     String userName = "bilbo";
-    OzoneVolume vol = client.createVolume(volumeName, userName, "100TB");
-    assertEquals(userName, vol.getOwnerName());
-    assertEquals(100, vol.getQuota().getSize(), 100);
-    assertEquals(OzoneQuota.Units.TB, vol.getQuota().getUnit());
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = client.getVolumeDetails(volumeName);
+    assertEquals(userName, vol.getOwner());
+    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(), vol.getQuota());
 
     String[] args = new String[] {"-updateVolume", url + "/" + volumeName,
         "-quota", "500MB", "-root"};
     assertEquals(0, ToolRunner.run(shell, args));
-    vol = client.getVolume(volumeName);
-    assertEquals(userName, vol.getOwnerName());
-    assertEquals(500, vol.getQuota().getSize(), 500);
-    assertEquals(OzoneQuota.Units.MB, vol.getQuota().getUnit());
+    vol = client.getVolumeDetails(volumeName);
+    assertEquals(userName, vol.getOwner());
+    assertEquals(OzoneQuota.parseQuota("500MB").sizeInBytes(), vol.getQuota());
 
     String newUser = "new-user";
     args = new String[] {"-updateVolume", url + "/" + volumeName,
         "-user", newUser, "-root"};
     assertEquals(0, ToolRunner.run(shell, args));
-    vol = client.getVolume(volumeName);
-    assertEquals(newUser, vol.getOwnerName());
+    vol = client.getVolumeDetails(volumeName);
+    assertEquals(newUser, vol.getOwner());
 
     // test error conditions
     args = new String[] {"-updateVolume", url + "/invalid-volume",
         "-user", newUser, "-root"};
     assertEquals(1, ToolRunner.run(shell, args));
     assertTrue(err.toString().contains(
-        "Volume owner change failed, error:VOLUME_NOT_FOUND"));
+        "Info Volume failed, error:VOLUME_NOT_FOUND"));
 
     err.reset();
     args = new String[] {"-updateVolume", url + "/invalid-volume",
         "-quota", "500MB", "-root"};
     assertEquals(1, ToolRunner.run(shell, args));
     assertTrue(err.toString().contains(
-        "Volume quota change failed, error:VOLUME_NOT_FOUND"));
+        "Info Volume failed, error:VOLUME_NOT_FOUND"));
   }
 
   @Test
   public void testListVolume() throws Exception {
+    LOG.info("Running testListVolume");
+    if (clientProtocol.equals(RestClient.class)) {
+      return;
+    }
     String commandOutput;
     List<VolumeInfo> volumes;
     final int volCount = 20;
@@ -265,7 +334,12 @@ public class TestOzoneShell {
         userName = user2;
         volumeName = "test-vol" + x;
       }
-      OzoneVolume vol = client.createVolume(volumeName, userName, "100TB");
+      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+          .setOwner(userName)
+          .setQuota("100TB")
+          .build();
+      client.createVolume(volumeName, volumeArgs);
+      OzoneVolume vol = client.getVolumeDetails(volumeName);
       assertNotNull(vol);
     }
 
@@ -343,16 +417,17 @@ public class TestOzoneShell {
 
   @Test
   public void testCreateBucket() throws Exception {
+    LOG.info("Running testCreateBucket");
     OzoneVolume vol = creatVolume();
     String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     String[] args = new String[] {"-createBucket",
-        url + "/" + vol.getVolumeName() + "/" + bucketName};
+        url + "/" + vol.getName() + "/" + bucketName};
 
     assertEquals(0, ToolRunner.run(shell, args));
     OzoneBucket bucketInfo = vol.getBucket(bucketName);
-    assertEquals(vol.getVolumeName(),
-        bucketInfo.getBucketInfo().getVolumeName());
-    assertEquals(bucketName, bucketInfo.getBucketName());
+    assertEquals(vol.getName(),
+        bucketInfo.getVolumeName());
+    assertEquals(bucketName, bucketInfo.getName());
 
     // test create a bucket in a non-exist volume
     args = new String[] {"-createBucket",
@@ -365,20 +440,22 @@ public class TestOzoneShell {
 
   @Test
   public void testDeleteBucket() throws Exception {
+    LOG.info("Running testDeleteBucket");
     OzoneVolume vol = creatVolume();
     String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    OzoneBucket bucketInfo = vol.createBucket(bucketName);
+    vol.createBucket(bucketName);
+    OzoneBucket bucketInfo = vol.getBucket(bucketName);
     assertNotNull(bucketInfo);
 
     String[] args = new String[] {"-deleteBucket",
-        url + "/" + vol.getVolumeName() + "/" + bucketName};
+        url + "/" + vol.getName() + "/" + bucketName};
     assertEquals(0, ToolRunner.run(shell, args));
 
     // verify if bucket has been deleted in volume
     try {
       vol.getBucket(bucketName);
       fail("Get bucket should have thrown.");
-    } catch (OzoneException e) {
+    } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           "Info Bucket failed, error: BUCKET_NOT_FOUND", e);
     }
@@ -393,7 +470,7 @@ public class TestOzoneShell {
     err.reset();
     // test delete non-exist bucket
     args = new String[] {"-deleteBucket",
-        url + "/" + vol.getVolumeName() + "/invalid-bucket"};
+        url + "/" + vol.getName() + "/invalid-bucket"};
     assertEquals(1, ToolRunner.run(shell, args));
     assertTrue(err.toString().contains(
         "Delete Bucket failed, error:BUCKET_NOT_FOUND"));
@@ -401,12 +478,13 @@ public class TestOzoneShell {
 
   @Test
   public void testInfoBucket() throws Exception {
+    LOG.info("Running testInfoBucket");
     OzoneVolume vol = creatVolume();
     String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     vol.createBucket(bucketName);
 
     String[] args = new String[] {"-infoBucket",
-        url + "/" + vol.getVolumeName() + "/" + bucketName};
+        url + "/" + vol.getName() + "/" + bucketName};
     assertEquals(0, ToolRunner.run(shell, args));
 
     String output = out.toString();
@@ -416,7 +494,7 @@ public class TestOzoneShell {
 
     // test get info from a non-exist bucket
     args = new String[] {"-infoBucket",
-        url + "/" + vol.getVolumeName() + "/invalid-bucket" + bucketName};
+        url + "/" + vol.getName() + "/invalid-bucket" + bucketName};
     assertEquals(1, ToolRunner.run(shell, args));
     assertTrue(err.toString().contains(
         "Info Bucket failed, error: BUCKET_NOT_FOUND"));
@@ -424,13 +502,15 @@ public class TestOzoneShell {
 
   @Test
   public void testUpdateBucket() throws Exception {
+    LOG.info("Running testUpdateBucket");
     OzoneVolume vol = creatVolume();
     String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    OzoneBucket bucket = vol.createBucket(bucketName);
-    assertEquals(0, bucket.getAcls().size());
+    vol.createBucket(bucketName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    int aclSize = bucket.getAcls().size();
 
     String[] args = new String[] {"-updateBucket",
-        url + "/" + vol.getVolumeName() + "/" + bucketName, "-addAcl",
+        url + "/" + vol.getName() + "/" + bucketName, "-addAcl",
         "user:frodo:rw,group:samwise:r"};
     assertEquals(0, ToolRunner.run(shell, args));
     String output = out.toString();
@@ -438,36 +518,40 @@ public class TestOzoneShell {
         && output.contains(OzoneConsts.OZONE_TIME_ZONE));
 
     bucket = vol.getBucket(bucketName);
-    assertEquals(2, bucket.getAcls().size());
+    assertEquals(2 + aclSize, bucket.getAcls().size());
 
-    OzoneAcl acl = bucket.getAcls().get(0);
+    OzoneAcl acl = bucket.getAcls().get(aclSize);
     assertTrue(acl.getName().equals("frodo")
         && acl.getType() == OzoneACLType.USER
         && acl.getRights()== OzoneACLRights.READ_WRITE);
 
     args = new String[] {"-updateBucket",
-        url + "/" + vol.getVolumeName() + "/" + bucketName, "-removeAcl",
+        url + "/" + vol.getName() + "/" + bucketName, "-removeAcl",
         "user:frodo:rw"};
     assertEquals(0, ToolRunner.run(shell, args));
 
     bucket = vol.getBucket(bucketName);
-    acl = bucket.getAcls().get(0);
-    assertEquals(1, bucket.getAcls().size());
+    acl = bucket.getAcls().get(aclSize);
+    assertEquals(1 + aclSize, bucket.getAcls().size());
     assertTrue(acl.getName().equals("samwise")
         && acl.getType() == OzoneACLType.GROUP
         && acl.getRights()== OzoneACLRights.READ);
 
     // test update bucket for a non-exist bucket
     args = new String[] {"-updateBucket",
-        url + "/" + vol.getVolumeName() + "/invalid-bucket", "-addAcl",
+        url + "/" + vol.getName() + "/invalid-bucket", "-addAcl",
         "user:frodo:rw"};
     assertEquals(1, ToolRunner.run(shell, args));
     assertTrue(err.toString().contains(
-        "Setting bucket property failed, error: BUCKET_NOT_FOUND"));
+        "Info Bucket failed, error: BUCKET_NOT_FOUND"));
   }
 
   @Test
   public void testListBucket() throws Exception {
+    LOG.info("Running testListBucket");
+    if (clientProtocol.equals(RestClient.class)) {
+      return;
+    }
     List<BucketInfo> buckets;
     String commandOutput;
     int bucketCount = 11;
@@ -478,13 +562,14 @@ public class TestOzoneShell {
     for (int i = 0; i < bucketCount; i++) {
       String name = "test-bucket" + i;
       bucketNames.add(name);
-      OzoneBucket bucket = vol.createBucket(name);
+      vol.createBucket(name);
+      OzoneBucket bucket = vol.getBucket(name);
       assertNotNull(bucket);
     }
 
     // test -length option
     String[] args = new String[] {"-listBucket",
-        url + "/" + vol.getVolumeName(), "-length", "100"};
+        url + "/" + vol.getName(), "-length", "100"};
     assertEquals(0, ToolRunner.run(shell, args));
     commandOutput = out.toString();
     buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput,
@@ -497,13 +582,13 @@ public class TestOzoneShell {
     // test-bucket10, test-bucket2, ,..., test-bucket9]
     for (int i = 0; i < buckets.size(); i++) {
       assertEquals(buckets.get(i).getBucketName(), bucketNames.get(i));
-      assertEquals(buckets.get(i).getVolumeName(), vol.getVolumeName());
+      assertEquals(buckets.get(i).getVolumeName(), vol.getName());
       assertTrue(buckets.get(i).getCreatedOn()
           .contains(OzoneConsts.OZONE_TIME_ZONE));
     }
 
     out.reset();
-    args = new String[] {"-listBucket", url + "/" + vol.getVolumeName(),
+    args = new String[] {"-listBucket", url + "/" + vol.getName(),
         "-length", "3"};
     assertEquals(0, ToolRunner.run(shell, args));
     commandOutput = out.toString();
@@ -519,7 +604,7 @@ public class TestOzoneShell {
 
     // test -prefix option
     out.reset();
-    args = new String[] {"-listBucket", url + "/" + vol.getVolumeName(),
+    args = new String[] {"-listBucket", url + "/" + vol.getName(),
         "-length", "100", "-prefix", "test-bucket1"};
     assertEquals(0, ToolRunner.run(shell, args));
     commandOutput = out.toString();
@@ -533,7 +618,7 @@ public class TestOzoneShell {
 
     // test -start option
     out.reset();
-    args = new String[] {"-listBucket", url + "/" + vol.getVolumeName(),
+    args = new String[] {"-listBucket", url + "/" + vol.getName(),
         "-length", "100", "-start", "test-bucket7"};
     assertEquals(0, ToolRunner.run(shell, args));
     commandOutput = out.toString();
@@ -546,7 +631,7 @@ public class TestOzoneShell {
 
     // test error conditions
     err.reset();
-    args = new String[] {"-listBucket", url + "/" + vol.getVolumeName(),
+    args = new String[] {"-listBucket", url + "/" + vol.getName(),
         "-length", "-1"};
     assertEquals(1, ToolRunner.run(shell, args));
     assertTrue(err.toString().contains(
@@ -555,9 +640,10 @@ public class TestOzoneShell {
 
   @Test
   public void testPutKey() throws Exception {
+    LOG.info("Running testPutKey");
     OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getBucketInfo().getVolumeName();
-    String bucketName = bucket.getBucketName();
+    String volumeName = bucket.getVolumeName();
+    String bucketName = bucket.getName();
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
 
     String[] args = new String[] {"-putKey",
@@ -565,8 +651,8 @@ public class TestOzoneShell {
         createTmpFile()};
     assertEquals(0, ToolRunner.run(shell, args));
 
-    OzoneKey keyInfo = bucket.getKeyInfo(keyName);
-    assertEquals(keyName, keyInfo.getObjectInfo().getKeyName());
+    OzoneKey keyInfo = bucket.getKey(keyName);
+    assertEquals(keyName, keyInfo.getName());
 
     // test put key in a non-exist bucket
     args = new String[] {"-putKey",
@@ -574,18 +660,22 @@ public class TestOzoneShell {
         createTmpFile()};
     assertEquals(1, ToolRunner.run(shell, args));
     assertTrue(err.toString().contains(
-        "Create key failed, error:BUCKET_NOT_FOUND"));
+        "Info Bucket failed, error: BUCKET_NOT_FOUND"));
   }
 
   @Test
   public void testGetKey() throws Exception {
+    LOG.info("Running testGetKey");
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
     OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getBucketInfo().getVolumeName();
-    String bucketName = bucket.getBucketName();
+    String volumeName = bucket.getVolumeName();
+    String bucketName = bucket.getName();
 
     String dataStr = "test-data";
-    bucket.putKey(keyName, dataStr);
+    OzoneOutputStream keyOutputStream =
+        bucket.createKey(keyName, dataStr.length());
+    keyOutputStream.write(dataStr.getBytes());
+    keyOutputStream.close();
 
     String tmpPath = baseDir.getAbsolutePath() + "/testfile-"
         + UUID.randomUUID().toString();
@@ -603,14 +693,19 @@ public class TestOzoneShell {
 
   @Test
   public void testDeleteKey() throws Exception {
+    LOG.info("Running testDeleteKey");
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
     OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getBucketInfo().getVolumeName();
-    String bucketName = bucket.getBucketName();
-    bucket.putKey(keyName, "test-data");
+    String volumeName = bucket.getVolumeName();
+    String bucketName = bucket.getName();
+    String dataStr = "test-data";
+    OzoneOutputStream keyOutputStream =
+        bucket.createKey(keyName, dataStr.length());
+    keyOutputStream.write(dataStr.getBytes());
+    keyOutputStream.close();
 
-    OzoneKey keyInfo = bucket.getKeyInfo(keyName);
-    assertEquals(keyName, keyInfo.getObjectInfo().getKeyName());
+    OzoneKey keyInfo = bucket.getKey(keyName);
+    assertEquals(keyName, keyInfo.getName());
 
     String[] args = new String[] {"-deleteKey",
         url + "/" + volumeName + "/" + bucketName + "/" + keyName};
@@ -618,9 +713,9 @@ public class TestOzoneShell {
 
     // verify if key has been deleted in the bucket
     try {
-      bucket.getKeyInfo(keyName);
+      bucket.getKey(keyName);
       fail("Get key should have thrown.");
-    } catch (OzoneException e) {
+    } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           "Lookup key failed, error:KEY_NOT_FOUND", e);
     }
@@ -643,22 +738,29 @@ public class TestOzoneShell {
 
   @Test
   public void testInfoKey() throws Exception {
+    LOG.info("Running testInfoKey");
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
     OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getBucketInfo().getVolumeName();
-    String bucketName = bucket.getBucketName();
-    bucket.putKey(keyName, "test-data");
+    String volumeName = bucket.getVolumeName();
+    String bucketName = bucket.getName();
+    String dataStr = "test-data";
+    OzoneOutputStream keyOutputStream =
+        bucket.createKey(keyName, dataStr.length());
+    keyOutputStream.write(dataStr.getBytes());
+    keyOutputStream.close();
 
     String[] args = new String[] {"-infoKey",
         url + "/" + volumeName + "/" + bucketName + "/" + keyName};
 
     // verify the response output
-    assertEquals(0, ToolRunner.run(shell, args));
-
+    int a = ToolRunner.run(shell, args);
     String output = out.toString();
+    assertEquals(0, a);
+
     assertTrue(output.contains(keyName));
-    assertTrue(output.contains("createdOn") && output.contains("modifiedOn")
-        && output.contains(OzoneConsts.OZONE_TIME_ZONE));
+    assertTrue(
+        output.contains("createdOn") && output.contains("modifiedOn") && output
+            .contains(OzoneConsts.OZONE_TIME_ZONE));
 
     // reset stream
     out.reset();
@@ -677,19 +779,27 @@ public class TestOzoneShell {
 
   @Test
   public void testListKey() throws Exception {
+    LOG.info("Running testListKey");
+    if (clientProtocol.equals(RestClient.class)) {
+      return;
+    }
     String commandOutput;
     List<KeyInfo> keys;
     int keyCount = 11;
     OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getBucketInfo().getVolumeName();
-    String bucketName = bucket.getBucketName();
+    String volumeName = bucket.getVolumeName();
+    String bucketName = bucket.getName();
 
     String keyName;
     List<String> keyNames = new ArrayList<>();
     for (int i = 0; i < keyCount; i++) {
       keyName = "test-key" + i;
       keyNames.add(keyName);
-      bucket.putKey(keyName, "test-data" + i);
+      String dataStr = "test-data";
+      OzoneOutputStream keyOutputStream =
+          bucket.createKey(keyName, dataStr.length());
+      keyOutputStream.write(dataStr.getBytes());
+      keyOutputStream.close();
     }
 
     // test -length option
@@ -763,17 +873,23 @@ public class TestOzoneShell {
         "the vaule should be a positive number"));
   }
 
-  private OzoneVolume creatVolume() throws OzoneException {
-    String volumeName = UUID.randomUUID().toString() + "volume";
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
+  private OzoneVolume creatVolume() throws OzoneException, IOException {
+    String volumeName = RandomStringUtils.randomNumeric(5) + "volume";
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume volume = client.getVolumeDetails(volumeName);
 
-    return vol;
+    return volume;
   }
 
-  private OzoneBucket creatBucket() throws OzoneException {
+  private OzoneBucket creatBucket() throws OzoneException, IOException {
     OzoneVolume vol = creatVolume();
-    String bucketName = UUID.randomUUID().toString() + "bucket";
-    OzoneBucket bucketInfo = vol.createBucket(bucketName);
+    String bucketName = RandomStringUtils.randomNumeric(5) + "bucket";
+    vol.createBucket(bucketName);
+    OzoneBucket bucketInfo = vol.getBucket(bucketName);
 
     return bucketInfo;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
index 46539e7..64e5f71 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@@ -132,7 +133,7 @@ public class TestBuckets {
       fail("Except the bucket creation to be failed because the"
           + " bucket name starts with an invalid char #");
     } catch (Exception e) {
-      assertTrue(e instanceof OzoneRestClientException);
+      assertTrue(e instanceof OzoneClientException);
       assertTrue(e.getMessage().contains("Bucket or Volume name"
           + " has an unsupported character : #"));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
index 4cd90c9..2d3cea9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
@@ -141,7 +142,7 @@ public class TestVolume {
       fail("Except the volume creation be failed because the"
           + " volume name starts with an invalid char #");
     } catch (Exception e) {
-      assertTrue(e instanceof OzoneRestClientException);
+      assertTrue(e instanceof OzoneClientException);
       assertTrue(e.getMessage().contains("Bucket or Volume name"
           + " has an unsupported character : #"));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
index 99020c8..7fe6bb8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
@@ -19,28 +19,30 @@
 package org.apache.hadoop.ozone.web.ozShell;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClient;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.http.client.utils.URIBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
+
 /**
  * Common interface for command handling.
  */
 public abstract class Handler {
 
-  protected OzoneRestClient client;
-
-  /**
-   * Constructs a client object.
-   */
-  public Handler() {
-    client = new OzoneRestClient();
-  }
+  protected static final Logger LOG = LoggerFactory.getLogger(Handler.class);
+  protected OzoneClient client;
 
   /**
    * Executes the Client command.
@@ -61,19 +63,44 @@ public abstract class Handler {
    * @throws URISyntaxException
    * @throws OzoneException
    */
-  protected URI verifyURI(String uri) throws URISyntaxException,
-      OzoneException {
+  protected URI verifyURI(String uri)
+      throws URISyntaxException, OzoneException, IOException {
     if ((uri == null) || uri.isEmpty()) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Ozone URI is needed to execute this command.");
     }
     URIBuilder ozoneURI = new URIBuilder(uri);
-
     if (ozoneURI.getPort() == 0) {
       ozoneURI.setPort(Shell.DEFAULT_OZONE_PORT);
     }
+
+    Configuration conf = new OzoneConfiguration();
+    String scheme = ozoneURI.getScheme();
+    if (scheme.equals(OZONE_HTTP_SCHEME)) {
+      if (ozoneURI.getHost() != null) {
+        if (ozoneURI.getPort() == -1) {
+          client = OzoneClientFactory.getRestClient(ozoneURI.getHost());
+        } else {
+          client = OzoneClientFactory
+              .getRestClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
+        }
+      } else {
+        client = OzoneClientFactory.getRestClient(conf);
+      }
+    } else if (scheme.equals(OZONE_URI_SCHEME) || scheme.isEmpty()) {
+      if (ozoneURI.getHost() != null) {
+        if (ozoneURI.getPort() == -1) {
+          client = OzoneClientFactory.getRpcClient(ozoneURI.getHost());
+        } else {
+          client = OzoneClientFactory
+              .getRpcClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
+        }
+      } else {
+        client = OzoneClientFactory.getRpcClient(conf);
+      }
+    } else {
+      throw new OzoneClientException("Invalid URI: " + ozoneURI);
+    }
     return ozoneURI.build();
   }
-
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
index d1c46b5..0788f9e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
@@ -18,9 +18,10 @@
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -39,7 +40,6 @@ public class CreateBucketHandler extends Handler {
 
   private String volumeName;
   private String bucketName;
-  private String rootName;
 
   /**
    * Executes create bucket.
@@ -54,7 +54,7 @@ public class CreateBucketHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.CREATE_BUCKET)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : createBucket is missing");
     }
 
@@ -62,7 +62,7 @@ public class CreateBucketHandler extends Handler {
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 2) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume and bucket name required in createBucket");
     }
 
@@ -74,23 +74,13 @@ public class CreateBucketHandler extends Handler {
       System.out.printf("Bucket Name : %s%n", bucketName);
     }
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    } else {
-      rootName = System.getProperty("user.name");
-    }
-
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-
-
-    OzoneVolume vol = client.getVolume(volumeName);
-    OzoneBucket bucket = vol.createBucket(bucketName);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    vol.createBucket(bucketName);
 
     if (cmd.hasOption(Shell.VERBOSE)) {
-      System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-          bucket.getBucketInfo().toJsonString()));
+      OzoneBucket bucket = vol.getBucket(bucketName);
+      System.out.printf(JsonUtils.toJsonStringWithDefaultPrettyPrinter(
+          JsonUtils.toJsonString(OzoneClientUtils.asBucketInfo(bucket))));
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HADOOP-15454. TestRollingFileSystemSinkWithLocal fails on Windows. Contributed by Xiao Liang.

Posted by xy...@apache.org.
HADOOP-15454. TestRollingFileSystemSinkWithLocal fails on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b9e145e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b9e145e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b9e145e

Branch: refs/heads/HDDS-4
Commit: 9b9e145e2d40c37623a6c1921627218ee69ff508
Parents: c8fa7cb
Author: Inigo Goiri <in...@apache.org>
Authored: Thu May 10 09:41:16 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../sink/TestRollingFileSystemSinkWithLocal.java | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b9e145e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithLocal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithLocal.java
index 96306bf..1a69c8d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithLocal.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithLocal.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.metrics2.sink;
 
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.metrics2.MetricsSystem;
 
 import org.junit.Test;
@@ -36,7 +37,7 @@ public class TestRollingFileSystemSinkWithLocal
    */
   @Test
   public void testWrite() throws Exception {
-    String path = methodDir.getAbsolutePath();
+    String path = methodDir.toURI().toString();
     MetricsSystem ms = initMetricsSystem(path, false, false);
 
     assertMetricsContents(doWriteTest(ms, path, 1));
@@ -49,7 +50,7 @@ public class TestRollingFileSystemSinkWithLocal
    */
   @Test
   public void testSilentWrite() throws Exception {
-    String path = methodDir.getAbsolutePath();
+    String path = methodDir.toURI().toString();
     MetricsSystem ms = initMetricsSystem(path, true, false);
 
     assertMetricsContents(doWriteTest(ms, path, 1));
@@ -62,7 +63,7 @@ public class TestRollingFileSystemSinkWithLocal
    */
   @Test
   public void testExistingWrite() throws Exception {
-    String path = methodDir.getAbsolutePath();
+    String path = methodDir.toURI().toString();
 
     assertMetricsContents(doAppendTest(path, false, false, 2));
   }
@@ -75,7 +76,7 @@ public class TestRollingFileSystemSinkWithLocal
    */
   @Test
   public void testExistingWrite2() throws Exception {
-    String path = methodDir.getAbsolutePath();
+    String path = methodDir.toURI().toString();
     MetricsSystem ms = initMetricsSystem(path, false, false);
 
     preCreateLogFile(path, 2);
@@ -91,7 +92,7 @@ public class TestRollingFileSystemSinkWithLocal
    */
   @Test
   public void testSilentExistingWrite() throws Exception {
-    String path = methodDir.getAbsolutePath();
+    String path = methodDir.toURI().toString();
 
     assertMetricsContents(doAppendTest(path, false, false, 2));
   }
@@ -101,12 +102,12 @@ public class TestRollingFileSystemSinkWithLocal
    */
   @Test
   public void testFailedWrite() {
-    String path = methodDir.getAbsolutePath();
+    String path = methodDir.toURI().toString();
     MetricsSystem ms = initMetricsSystem(path, false, false);
 
     new MyMetrics1().registerWith(ms);
 
-    methodDir.setWritable(false);
+    assertTrue(FileUtil.setWritable(methodDir, false));
     MockSink.errored = false;
 
     try {
@@ -130,12 +131,12 @@ public class TestRollingFileSystemSinkWithLocal
    */
   @Test
   public void testSilentFailedWrite() {
-    String path = methodDir.getAbsolutePath();
+    String path = methodDir.toURI().toString();
     MetricsSystem ms = initMetricsSystem(path, true, false);
 
     new MyMetrics1().registerWith(ms);
 
-    methodDir.setWritable(false);
+    assertTrue(FileUtil.setWritable(methodDir, false));
     MockSink.errored = false;
 
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HADOOP-15441. Log kms url and token service at debug level. Contributed by Gabor Bota

Posted by xy...@apache.org.
HADOOP-15441. Log kms url and token service at debug level. Contributed by  Gabor Bota


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca35c2ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca35c2ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca35c2ed

Branch: refs/heads/HDDS-4
Commit: ca35c2ed6550d8e0e17ebec7a171204f140d4626
Parents: 992eea5
Author: Rushabh Shah <sh...@apache.org>
Authored: Sat May 12 12:19:13 2018 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca35c2ed/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index dddd358..08787a5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -402,7 +402,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
                     KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
             new EncryptedQueueRefiller());
     authToken = new DelegationTokenAuthenticatedURL.Token();
-    LOG.info("KMSClientProvider for KMS url: {} delegation token service: {}" +
+    LOG.debug("KMSClientProvider for KMS url: {} delegation token service: {}" +
         " created.", kmsUrl, dtService);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDDS-39. Ozone: Compile Ozone/HDFS/Cblock protobuf files with proto3 compiler using maven protoc plugin. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index b621a08..57d4287 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 2da6874..a5d268d 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.ozone.genesis;
 
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.RandomStringUtils;
@@ -53,15 +53,22 @@ import java.util.concurrent.atomic.AtomicInteger;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.CreateContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
-
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .CreateContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ReadChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .WriteChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .PutKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .GetKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerData;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 793ffb4..862a693 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -104,6 +104,11 @@
     <mssql.version>6.2.1.jre7</mssql.version>
     <okhttp.version>2.7.5</okhttp.version>
 
+    <!-- Maven protoc compiler -->
+    <protobuf-maven-plugin.version>0.5.1</protobuf-maven-plugin.version>
+    <protobuf-compile.version>3.1.0</protobuf-compile.version>
+    <os-maven-plugin.version>1.5.0.Final</os-maven-plugin.version>
+
     <!-- define the Java language version used by the compiler -->
     <javac.version>1.8</javac.version>
 
@@ -413,7 +418,7 @@
         <version>${hadoop.version}</version>
       </dependency>
 
-     <dependency>
+      <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
         <version>${hadoop.version}</version>
@@ -1737,8 +1742,8 @@
           </ignores>
         </configuration>
       </plugin>
-       <plugin>
-         <groupId>org.apache.maven.plugins</groupId>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
           <execution>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDDS-21. Add support for rename key within a bucket for rest client. Contributed by Lokesh Jain.

Posted by xy...@apache.org.
HDDS-21. Add support for rename key within a bucket for rest client. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a66072d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a66072d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a66072d2

Branch: refs/heads/HDDS-4
Commit: a66072d2a6073cb0608200a03ea69d569cbd7a29
Parents: ce2c1b0
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Sat May 12 17:40:29 2018 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../hadoop/ozone/client/rest/RestClient.java    | 16 ++++++-
 .../ozone/client/rest/headers/Header.java       |  2 +
 .../ozone/client/rest/TestOzoneRestClient.java  | 30 +++++++++++++
 .../hadoop/ozone/web/handlers/KeyHandler.java   | 45 ++++++++++++++++++++
 .../hadoop/ozone/web/interfaces/Keys.java       | 31 ++++++++++++++
 5 files changed, 123 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66072d2/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index 1fd2091..ac71abe 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -678,7 +678,21 @@ public class RestClient implements ClientProtocol {
   @Override
   public void renameKey(String volumeName, String bucketName,
       String fromKeyName, String toKeyName) throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
+    try {
+      Preconditions.checkNotNull(volumeName);
+      Preconditions.checkNotNull(bucketName);
+      Preconditions.checkNotNull(fromKeyName);
+      Preconditions.checkNotNull(toKeyName);
+      URIBuilder builder = new URIBuilder(ozoneRestUri);
+      builder.setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName
+          + PATH_SEPARATOR + fromKeyName);
+      builder.addParameter(Header.OZONE_RENAME_TO_KEY_PARAM_NAME, toKeyName);
+      HttpPost httpPost = new HttpPost(builder.build());
+      addOzoneHeaders(httpPost);
+      EntityUtils.consume(executeHttpRequest(httpPost));
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66072d2/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
index 00d4857..ebfc0a9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
@@ -65,6 +65,8 @@ public final class Header {
   public static final String OZONE_LIST_QUERY_PREVKEY="prev-key";
   public static final String OZONE_LIST_QUERY_ROOTSCAN="root-scan";
 
+  public static final String OZONE_RENAME_TO_KEY_PARAM_NAME = "toKey";
+
   private Header() {
     // Never constructed.
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66072d2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index a94ee6c..9918d63 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -389,6 +389,36 @@ public class TestOzoneRestClient {
     bucket.getKey(keyName);
   }
 
+  @Test
+  public void testRenameKey()
+      throws IOException, OzoneException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String fromKeyName = UUID.randomUUID().toString();
+    String value = "sample value";
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OzoneOutputStream out = bucket.createKey(fromKeyName,
+        value.getBytes().length, ReplicationType.STAND_ALONE,
+        ReplicationFactor.ONE);
+    out.write(value.getBytes());
+    out.close();
+    OzoneKey key = bucket.getKey(fromKeyName);
+    Assert.assertEquals(fromKeyName, key.getName());
+
+    String toKeyName = UUID.randomUUID().toString();
+    bucket.renameKey(fromKeyName, toKeyName);
+
+    key = bucket.getKey(toKeyName);
+    Assert.assertEquals(toKeyName, key.getName());
+
+    // Lookup for old key should fail.
+    thrown.expectMessage("Lookup key failed, error");
+    bucket.getKey(fromKeyName);
+  }
+
   /**
    * Close OzoneClient and shutdown MiniDFSCluster.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66072d2/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
index d4c5a79..8c0b103 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
@@ -242,4 +242,49 @@ public class KeyHandler implements Keys {
       }
     }.handleCall(volume, bucket, keys, req, headers, info, null);
   }
+
+  /**
+   * Renames an existing key within a bucket.
+   *
+   * @param volume      Storage Volume Name
+   * @param bucket      Name of the bucket
+   * @param key         Name of the Object
+   * @param toKeyName   New name of the Object
+   * @param req         http Request
+   * @param info        UriInfo
+   * @param headers     HttpHeaders
+   * @return Response
+   * @throws OzoneException
+   */
+  @Override
+  public Response renameKey(String volume, String bucket, String key,
+      String toKeyName, Request req, UriInfo info, HttpHeaders headers)
+      throws OzoneException {
+    return new KeyProcessTemplate() {
+      /**
+       * Abstract function that gets implemented in the KeyHandler functions.
+       * This function will just deal with the core file system related logic
+       * and will rely on handleCall function for repetitive error checks
+       *
+       * @param args - parsed bucket args, name, userName, ACLs etc
+       * @param input - The body as an Input Stream
+       * @param request - Http request
+       * @param headers - Parsed http Headers.
+       * @param info - UriInfo
+       *
+       * @return Response
+       *
+       * @throws IOException - From the file system operations
+       */
+      @Override
+      public Response doProcess(KeyArgs args, InputStream input,
+          Request request, HttpHeaders headers,
+          UriInfo info)
+          throws IOException, OzoneException, NoSuchAlgorithmException {
+        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+        fs.renameKey(args, toKeyName);
+        return OzoneRestUtils.getResponse(args, HTTP_OK, "");
+      }
+    }.handleCall(volume, bucket, key, req, headers, info, null);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66072d2/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
index f9255f2..1ce81c2 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
@@ -29,6 +29,7 @@ import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
 import javax.ws.rs.PUT;
+import javax.ws.rs.POST;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.QueryParam;
@@ -142,5 +143,35 @@ public interface Keys {
       @PathParam("bucket") String bucket, @PathParam("keys") String keys,
       @Context Request req, @Context UriInfo info, @Context HttpHeaders headers)
       throws OzoneException;
+
+  /**
+   * Renames an existing key within a bucket.
+   *
+   * @param volume Storage Volume Name
+   * @param bucket Name of the bucket
+   * @param keys Name of the Object
+   * @param req http Request
+   * @param headers HttpHeaders
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @POST
+  @ApiOperation("Renames an existing key within a bucket")
+  @ApiImplicitParams({
+      @ApiImplicitParam(name = "x-ozone-version", example = "v1", required =
+          true, paramType = "header"),
+      @ApiImplicitParam(name = "x-ozone-user", example = "user", required =
+          true, paramType = "header"),
+      @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 "
+          + "04:23:30 GMT", required = true, paramType = "header"),
+      @ApiImplicitParam(name = "Authorization", example = "OZONE", required =
+          true, paramType = "header")})
+  Response renameKey(@PathParam("volume") String volume,
+      @PathParam("bucket") String bucket, @PathParam("keys") String keys,
+      @QueryParam(Header.OZONE_RENAME_TO_KEY_PARAM_NAME) String toKeyName,
+      @Context Request req, @Context UriInfo info, @Context HttpHeaders headers)
+      throws OzoneException;
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HDDS-43: Rename hdsl to hdds in hadoop-ozone/acceptance-test/README.md. Contributed by Sandeep Nemuri

Posted by xy...@apache.org.
HDDS-43: Rename hdsl to hdds in hadoop-ozone/acceptance-test/README.md. Contributed by Sandeep Nemuri


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9a1ed71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9a1ed71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9a1ed71

Branch: refs/heads/HDDS-4
Commit: e9a1ed718e5473b14bd762ba77afc3570d882a32
Parents: 3653016
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Thu May 10 17:24:40 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 hadoop-ozone/acceptance-test/README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9a1ed71/hadoop-ozone/acceptance-test/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/README.md b/hadoop-ozone/acceptance-test/README.md
index 07d10fb..3a0ca49 100644
--- a/hadoop-ozone/acceptance-test/README.md
+++ b/hadoop-ozone/acceptance-test/README.md
@@ -12,7 +12,7 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
-# Acceptance test suite for Ozone/Hdsl
+# Acceptance test suite for Ozone/Hdds
 
 This project contains acceptance tests for ozone/hdds using docker-compose and [robot framework](http://robotframework.org/).
 
@@ -20,7 +20,7 @@ This project contains acceptance tests for ozone/hdds using docker-compose and [
 
 To run the acceptance tests, please activate the `ozone-acceptance-test` profile and do a full build.
 
-Typically you need a `mvn install -Phdsl,ozone-acceptance-test,dist -DskipTests` for a build without unit tests but with acceptance test.
+Typically you need a `mvn install -Phdds,ozone-acceptance-test,dist -DskipTests` for a build without unit tests but with acceptance test.
 
 Notes:
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDDS-40. Separating packaging of Ozone/HDDS from the main Hadoop. Contributed by Elek, Marton.

Posted by xy...@apache.org.
HDDS-40. Separating packaging of Ozone/HDDS from the main Hadoop.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51f44b85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51f44b85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51f44b85

Branch: refs/heads/HDDS-4
Commit: 51f44b85839579b6dd8d75fff070b1e8026b30c0
Parents: ce914a4
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 13:52:05 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |   5 +
 dev-support/bin/dist-layout-stitching           |  15 --
 dev-support/bin/ozone-dist-layout-stitching     | 153 +++++++++++++++++++
 dev-support/bin/ozone-dist-tar-stitching        |  48 ++++++
 hadoop-dist/pom.xml                             | 152 +++++++++++-------
 hadoop-dist/src/main/compose/ozone/.env         |   2 +-
 .../src/main/compose/ozone/docker-compose.yaml  |   8 +-
 .../hdfs/server/namenode/NameNodeUtils.java     |   2 +-
 hadoop-ozone/acceptance-test/README.md          |  22 ++-
 .../dev-support/bin/robot-all.sh                |  18 +++
 .../acceptance-test/dev-support/bin/robot.sh    |  38 +++++
 hadoop-ozone/acceptance-test/pom.xml            |  29 +---
 .../acceptance-test/src/test/compose/.env       |   2 +-
 .../src/test/compose/docker-compose.yaml        |   8 +-
 .../test/robotframework/acceptance/ozone.robot  |   7 +-
 15 files changed, 394 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 440708a..3883ce2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -48,3 +48,8 @@ patchprocess/
 .history/
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log
+
+#robotframework outputs
+log.html
+output.xml
+report.html

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/dev-support/bin/dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/dist-layout-stitching b/dev-support/bin/dist-layout-stitching
index 6557161..584821a 100755
--- a/dev-support/bin/dist-layout-stitching
+++ b/dev-support/bin/dist-layout-stitching
@@ -146,21 +146,6 @@ run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-api/target/hadoop-client-
 run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-${VERSION}.jar" share/hadoop/client/
 run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-${VERSION}.jar" share/hadoop/client/
 
-# HDDS
-run copy "${ROOT}/hadoop-hdds/common/target/hadoop-hdds-common-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/framework/target/hadoop-hdds-server-framework-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/server-scm/target/hadoop-hdds-server-scm-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/container-service/target/hadoop-hdds-container-service-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/client/target/hadoop-hdds-client-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/tools/target/hadoop-hdds-tools-${HDDS_VERSION}" .
-
-# Ozone
-run copy "${ROOT}/hadoop-ozone/common/target/hadoop-ozone-common-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
-
 run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" .
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/dev-support/bin/ozone-dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching
new file mode 100755
index 0000000..1b0b224
--- /dev/null
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -0,0 +1,153 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# project.version
+VERSION=$1
+
+# project.build.directory
+BASEDIR=$2
+
+#hdds.version
+HDDS_VERSION=$3
+
+function run()
+{
+  declare res
+
+  echo "\$ ${*}"
+  "${@}"
+  res=$?
+  if [[ ${res} != 0 ]]; then
+    echo
+    echo "Failed!"
+    echo
+    exit "${res}"
+  fi
+}
+
+function findfileindir()
+{
+  declare file="$1"
+  declare dir="${2:-./share}"
+  declare count
+
+  count=$(find "${dir}" -iname "${file}" | wc -l)
+
+  #shellcheck disable=SC2086
+  echo ${count}
+}
+
+function copyifnotexists()
+{
+  declare src="$1"
+  declare dest="$2"
+
+  declare srcname
+  declare destdir
+
+  declare child
+  declare childpath
+
+  if [[ -f "${src}" ]]; then
+    srcname=${src##*/}
+    if [[ "${srcname}" != *.jar ||
+          $(findfileindir "${srcname}") -eq "0" ]]; then
+      destdir=$(dirname "${dest}")
+      mkdir -p "${destdir}"
+      cp -p "${src}" "${dest}"
+    fi
+  else
+    for childpath in "${src}"/*; do
+      child="${childpath##*/}"
+      if [[ "${child}" == "doc" ||
+            "${child}" == "webapps" ]]; then
+        mkdir -p "${dest}/${child}"
+        cp -r "${src}/${child}"/* "${dest}/${child}"
+        continue;
+      fi
+      copyifnotexists "${src}/${child}" "${dest}/${child}"
+    done
+  fi
+}
+
+#Copy all contents as is except the lib.
+#for libs check for existence in share directory, if not exist then only copy.
+function copy()
+{
+  declare src="$1"
+  declare dest="$2"
+
+  declare child
+  declare childpath
+
+  if [[ -d "${src}" ]]; then
+    for childpath in "${src}"/*; do
+      child="${childpath##*/}"
+
+      if [[ "${child}" == "share" ]]; then
+        copyifnotexists "${src}/${child}" "${dest}/${child}"
+      else
+        if [[ -d "${src}/${child}" ]]; then
+          mkdir -p "${dest}/${child}"
+          cp -pr "${src}/${child}"/* "${dest}/${child}"
+        else
+          cp -pr "${src}/${child}" "${dest}/${child}"
+        fi
+      fi
+    done
+  fi
+}
+
+# shellcheck disable=SC2164
+ROOT=$(cd "${BASEDIR}"/../..;pwd)
+echo
+echo "Current directory $(pwd)"
+echo
+run rm -rf "ozone"
+run mkdir "ozone"
+run cd "ozone"
+run cp -p "${ROOT}/LICENSE.txt" .
+run cp -p "${ROOT}/NOTICE.txt" .
+run cp -p "${ROOT}/README.txt" .
+
+# Copy hadoop-common first so that it have always have all dependencies.
+# Remaining projects will copy only libraries which are not present already in 'share' directory.
+run copy "${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
+run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
+run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" .
+
+
+# HDDS
+run copy "${ROOT}/hadoop-hdds/common/target/hadoop-hdds-common-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/framework/target/hadoop-hdds-server-framework-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/server-scm/target/hadoop-hdds-server-scm-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/container-service/target/hadoop-hdds-container-service-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/client/target/hadoop-hdds-client-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/tools/target/hadoop-hdds-tools-${HDDS_VERSION}" .
+
+# Ozone
+run copy "${ROOT}/hadoop-ozone/common/target/hadoop-ozone-common-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+
+mkdir -p ./share/hadoop/mapreduce
+mkdir -p ./share/hadoop/yarn
+echo
+echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}"
+echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/dev-support/bin/ozone-dist-tar-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-tar-stitching b/dev-support/bin/ozone-dist-tar-stitching
new file mode 100755
index 0000000..decfa23
--- /dev/null
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# project.version
+VERSION=$1
+
+# project.build.directory
+BASEDIR=$2
+
+function run()
+{
+  declare res
+
+  echo "\$ ${*}"
+  "${@}"
+  res=$?
+  if [[ ${res} != 0 ]]; then
+    echo
+    echo "Failed!"
+    echo
+    exit "${res}"
+  fi
+}
+
+#To make the final dist directory easily mountable from docker we don't use
+#version name in the directory name.
+#To include the version name in the root directory of the tar file
+# we create a symbolic link and dereference it during the tar creation
+ln -s -f ozone ozone-${VERSION}
+run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone"
+run gzip -f "ozone-${VERSION}.tar"
+echo
+echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"
+echo
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 43836eb..999d44c 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -13,8 +13,8 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
                       http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
@@ -168,10 +168,13 @@
                 </goals>
                 <configuration>
                   <executable>${shell-executable}</executable>
-                  <workingDirectory>${project.build.directory}</workingDirectory>
+                  <workingDirectory>${project.build.directory}
+                  </workingDirectory>
                   <requiresOnline>false</requiresOnline>
                   <arguments>
-                    <argument>${basedir}/../dev-support/bin/dist-layout-stitching</argument>
+                    <argument>
+                      ${basedir}/../dev-support/bin/dist-layout-stitching
+                    </argument>
                     <argument>${project.version}</argument>
                     <argument>${project.build.directory}</argument>
                     <argument>${hdds.version}</argument>
@@ -182,14 +185,16 @@
                 <id>toolshooks</id>
                 <phase>prepare-package</phase>
                 <goals>
-                    <goal>exec</goal>
+                  <goal>exec</goal>
                 </goals>
                 <configuration>
                   <executable>${shell-executable}</executable>
                   <workingDirectory>${basedir}</workingDirectory>
                   <requiresOnline>false</requiresOnline>
                   <arguments>
-                    <argument>${basedir}/../dev-support/bin/dist-tools-hooks-maker</argument>
+                    <argument>
+                      ${basedir}/../dev-support/bin/dist-tools-hooks-maker
+                    </argument>
                     <argument>${project.version}</argument>
                     <argument>${project.build.directory}</argument>
                     <argument>${basedir}/../hadoop-tools</argument>
@@ -203,14 +208,16 @@
                   <goal>exec</goal>
                 </goals>
                 <configuration>
-                    <executable>${shell-executable}</executable>
-                    <workingDirectory>${project.build.directory}</workingDirectory>
-                    <requiresOnline>false</requiresOnline>
-                    <arguments>
-                      <argument>${basedir}/../dev-support/bin/dist-tar-stitching</argument>
-                      <argument>${project.version}</argument>
-                      <argument>${project.build.directory}</argument>
-                    </arguments>
+                  <executable>${shell-executable}</executable>
+                  <workingDirectory>${project.build.directory}
+                  </workingDirectory>
+                  <requiresOnline>false</requiresOnline>
+                  <arguments>
+                    <argument>${basedir}/../dev-support/bin/dist-tar-stitching
+                    </argument>
+                    <argument>${project.version}</argument>
+                    <argument>${project.build.directory}</argument>
+                  </arguments>
                 </configuration>
               </execution>
             </executions>
@@ -218,14 +225,12 @@
         </plugins>
       </build>
     </profile>
-
     <profile>
       <id>hdds</id>
       <activation>
         <activeByDefault>false</activeByDefault>
       </activation>
       <dependencies>
-
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-ozone-ozone-manager</artifactId>
@@ -261,41 +266,86 @@
         <plugins>
           <plugin>
             <artifactId>maven-resources-plugin</artifactId>
-              <executions>
-                <execution>
-                  <id>copy-docker-compose</id>
-                  <goals>
-                    <goal>copy-resources</goal>
-                  </goals>
-                  <phase>prepare-package</phase>
-                  <configuration>
-                    <outputDirectory>${project.build.directory}/compose</outputDirectory>
-                    <resources>
-                      <resource>
-                        <directory>src/main/compose</directory>
-                        <filtering>true</filtering>
-                      </resource>
-                    </resources>
-                  </configuration>
-                </execution>
-                <execution>
-                  <id>copy-dockerfile</id>
-                  <goals>
-                    <goal>copy-resources</goal>
-                  </goals>
-                  <phase>prepare-package</phase>
-                  <configuration>
-                    <outputDirectory>${project.build.directory}</outputDirectory>
-                    <resources>
-                      <resource>
-                        <directory>src/main/docker</directory>
-                        <filtering>true</filtering>
-                      </resource>
-                    </resources>
-                  </configuration>
-                </execution>
-              </executions>
-            </plugin>
+            <executions>
+              <execution>
+                <id>copy-docker-compose</id>
+                <goals>
+                  <goal>copy-resources</goal>
+                </goals>
+                <phase>prepare-package</phase>
+                <configuration>
+                  <outputDirectory>${project.build.directory}/compose
+                  </outputDirectory>
+                  <resources>
+                    <resource>
+                      <directory>src/main/compose</directory>
+                      <filtering>true</filtering>
+                    </resource>
+                  </resources>
+                </configuration>
+              </execution>
+              <execution>
+                <id>copy-dockerfile</id>
+                <goals>
+                  <goal>copy-resources</goal>
+                </goals>
+                <phase>prepare-package</phase>
+                <configuration>
+                  <outputDirectory>${project.build.directory}</outputDirectory>
+                  <resources>
+                    <resource>
+                      <directory>src/main/docker</directory>
+                      <filtering>true</filtering>
+                    </resource>
+                  </resources>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>dist-ozone</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>exec</goal>
+                </goals>
+                <configuration>
+                  <executable>${shell-executable}</executable>
+                  <workingDirectory>${project.build.directory}
+                  </workingDirectory>
+                  <arguments>
+                    <argument>
+                      ${basedir}/../dev-support/bin/ozone-dist-layout-stitching
+                    </argument>
+                    <argument>${project.version}</argument>
+                    <argument>${project.build.directory}</argument>
+                    <argument>${hdds.version}</argument>
+                  </arguments>
+                </configuration>
+              </execution>
+              <execution>
+                <id>tar-ozone</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>exec</goal>
+                </goals>
+                <configuration>
+                  <executable>${shell-executable}</executable>
+                  <workingDirectory>${project.build.directory}
+                  </workingDirectory>
+                  <arguments>
+                    <argument>${basedir}/../dev-support/bin/ozone-dist-tar-stitching
+                    </argument>
+                    <argument>${hdds.version}</argument>
+                    <argument>${project.build.directory}</argument>
+                  </arguments>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
         </plugins>
       </build>
     </profile>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-dist/src/main/compose/ozone/.env
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/.env b/hadoop-dist/src/main/compose/ozone/.env
index af20d3e..67eed25 100644
--- a/hadoop-dist/src/main/compose/ozone/.env
+++ b/hadoop-dist/src/main/compose/ozone/.env
@@ -14,4 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-VERSION=${project.version}
\ No newline at end of file
+HDDS_VERSION=${hdds.version}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index 13a7db6..faf420c 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -20,7 +20,7 @@ services:
       image: apache/hadoop-runner
       hostname: namenode
       volumes:
-         - ../..//hadoop-${VERSION}:/opt/hadoop
+         - ../../ozone:/opt/hadoop
       ports:
          - 9870:9870
       environment:
@@ -31,7 +31,7 @@ services:
    datanode:
       image: apache/hadoop-runner
       volumes:
-        - ../..//hadoop-${VERSION}:/opt/hadoop
+        - ../../ozone:/opt/hadoop
       ports:
         - 9864
       command: ["/opt/hadoop/bin/ozone","datanode"]
@@ -40,7 +40,7 @@ services:
    ksm:
       image: apache/hadoop-runner
       volumes:
-         - ../..//hadoop-${VERSION}:/opt/hadoop
+         - ../../ozone:/opt/hadoop
       ports:
          - 9874:9874
       environment:
@@ -51,7 +51,7 @@ services:
    scm:
       image: apache/hadoop-runner
       volumes:
-         - ../..//hadoop-${VERSION}:/opt/hadoop
+         - ../../ozone:/opt/hadoop
       ports:
          - 9876:9876
       env_file:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
index 838a8e7..ec1d510 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
@@ -112,7 +112,7 @@ public final class NameNodeUtils {
     }
 
     if (port > 0) {
-      return currentNnAddress;
+       return currentNnAddress;
     } else {
       // the port is missing or 0. Figure out real bind address later.
       return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-ozone/acceptance-test/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/README.md b/hadoop-ozone/acceptance-test/README.md
index 3a0ca49..2714e0a 100644
--- a/hadoop-ozone/acceptance-test/README.md
+++ b/hadoop-ozone/acceptance-test/README.md
@@ -20,19 +20,29 @@ This project contains acceptance tests for ozone/hdds using docker-compose and [
 
 To run the acceptance tests, please activate the `ozone-acceptance-test` profile and do a full build.
 
-Typically you need a `mvn install -Phdds,ozone-acceptance-test,dist -DskipTests` for a build without unit tests but with acceptance test.
+```
+mvn clean install -Pdist -Phdds
+cd hadoop-ozone/acceptance-test
+mvn integration-test -Phdds,ozone-acceptance-test,dist -DskipTests
+```
 
 Notes:
 
  1. You need a hadoop build in hadoop-dist/target directory.
  2. The `ozone-acceptance-test` could be activated with profile even if the unit tests are disabled.
-
+ 3. This method does not require the robot framework on path as jpython is used.
 
 ## Development
 
-You can run manually the robot tests with `robot` cli. (See robotframework docs to install it.)
+You can also run manually the robot tests with `robot` cli. 
+ (See robotframework docs to install it: http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#installation-instructions)
+
+In the dev-support directory we have two wrapper scripts to run robot framework with local robot cli 
+instead of calling it from maven.
 
- 1. Go to the `src/test/robotframework`
- 2. Execute `robot -v basedir:${PWD}/../../.. -v VERSION:3.2.0-SNAPSHOT .`
+It's useful during the development of the robot files as any robotframework cli 
+arguments could be used.
 
-You can also use select just one test with -t `"*testnamefragment*"`
\ No newline at end of file
+ 1. `dev-support/bin/robot.sh` is the simple wrapper. The .robot file should be used as an argument.
+ 2. `dev-support/bin/robot-all.sh` will call the robot.sh with the main acceptance test directory, 
+ which means all the acceptance tests will be executed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
new file mode 100755
index 0000000..0e212a2
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+$DIR/robot.sh $DIR/../../src/test/robotframework/acceptance
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh
new file mode 100755
index 0000000..b651f76
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+#basedir is the directory of the whole hadoop project. Used to calculate the
+#exact path to the hadoop-dist project
+BASEDIR=${DIR}/../../../..
+
+if [ ! "$(which robot)" ] ; then
+    echo ""
+    echo "robot is not on your PATH."
+    echo ""
+    echo "Please install it according to the documentation:"
+    echo "    http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#installation-instructions"
+    echo "    (TLDR; most of the time you need: 'pip install robotframework')"
+    exit -1
+fi
+
+OZONEDISTDIR="$BASEDIR/hadoop-dist/target/ozone"
+if [ ! -d "$OZONEDISTDIR" ]; then
+   echo "Ozone can't be found in the $OZONEDISTDIR."
+   echo "You may need a full build with -Phdds and -Pdist profiles"
+   exit -1
+fi
+robot -v basedir:$BASEDIR $@

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-ozone/acceptance-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/pom.xml b/hadoop-ozone/acceptance-test/pom.xml
index fb6794c..ef45c44 100644
--- a/hadoop-ozone/acceptance-test/pom.xml
+++ b/hadoop-ozone/acceptance-test/pom.xml
@@ -28,32 +28,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <description>Apache Hadoop Ozone Acceptance Tests</description>
   <name>Apache Hadoop Ozone Acceptance Tests</name>
   <packaging>pom</packaging>
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-resources-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>copy-docker-compose</id>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <phase>process-test-resources</phase>
-            <configuration>
-              <outputDirectory>${project.build.directory}/compose
-              </outputDirectory>
-              <resources>
-                <resource>
-                  <directory>src/test/compose</directory>
-                  <filtering>true</filtering>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
   <profiles>
     <profile>
       <id>ozone-acceptance-test</id>
@@ -70,8 +44,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                 </goals>
                 <configuration>
                   <variables>
-                    <variable>version:${project.version}</variable>
-                    <variable>basedir:${project.basedir}</variable>
+                    <variable>basedir:${project.basedir}/../..</variable>
                   </variables>
                   <skip>false</skip>
                   <skipTests>false</skipTests>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-ozone/acceptance-test/src/test/compose/.env
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/.env b/hadoop-ozone/acceptance-test/src/test/compose/.env
index 79f890b..cf22168 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/.env
+++ b/hadoop-ozone/acceptance-test/src/test/compose/.env
@@ -14,4 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-HADOOPDIR=../../hadoop-dist/target/hadoop-${project.version}
\ No newline at end of file
+OZONEDIR=../../../hadoop-dist/target/ozone

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
index da63f84..44bd4a0 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
@@ -20,7 +20,7 @@ services:
       image: apache/hadoop-runner
       hostname: namenode
       volumes:
-         - ${HADOOPDIR}:/opt/hadoop
+         - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9870
       environment:
@@ -31,7 +31,7 @@ services:
    datanode:
       image: apache/hadoop-runner
       volumes:
-        - ${HADOOPDIR}:/opt/hadoop
+        - ${OZONEDIR}:/opt/hadoop
       ports:
         - 9864
       command: ["/opt/hadoop/bin/ozone","datanode"]
@@ -41,7 +41,7 @@ services:
       image: apache/hadoop-runner
       hostname: ksm
       volumes:
-         - ${HADOOPDIR}:/opt/hadoop
+         - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9874
       environment:
@@ -52,7 +52,7 @@ services:
    scm:
       image: apache/hadoop-runner
       volumes:
-         - ${HADOOPDIR}:/opt/hadoop
+         - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9876
       env_file:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f44b85/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
index 211ec4c..c0e04a8 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
@@ -21,8 +21,7 @@ Suite Teardown      Teardown Ozone Cluster
 
 *** Variables ***
 ${COMMON_REST_HEADER}   -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H  "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
-${version}
-
+${basedir}
 *** Test Cases ***
 
 Daemons are running without error
@@ -130,8 +129,8 @@ Execute on
 
 Run docker compose
     [arguments]                     ${command}
-                                    Set Environment Variable    HADOOPDIR                              ${basedir}/../../hadoop-dist/target/hadoop-${version}
-    ${rc}                           ${output} =                 Run And Return Rc And Output           docker-compose -f ${basedir}/target/compose/docker-compose.yaml ${command}
+                                    Set Environment Variable    OZONEDIR                               ${basedir}/hadoop-dist/target/ozone
+    ${rc}                           ${output} =                 Run And Return Rc And Output           docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command}
     Log                             ${output}
     Should Be Equal As Integers     ${rc}                       0
     [return]                            ${rc}                       ${output}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HDDS-47. Add acceptance tests for Ozone Shell. Contributed by Lokesh Jain.

Posted by xy...@apache.org.
HDDS-47. Add acceptance tests for Ozone Shell.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62e6ba9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62e6ba9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62e6ba9b

Branch: refs/heads/HDDS-4
Commit: 62e6ba9bbffc17c6c9aab9c171d5a3710f83f7d3
Parents: 6f809c2
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 10:20:04 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../test/robotframework/acceptance/ozone.robot  | 24 ++++++++++++++++++--
 .../hadoop/ozone/client/OzoneClientUtils.java   |  1 +
 2 files changed, 23 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e6ba9b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
index 1a9cee7..211ec4c 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
@@ -52,9 +52,29 @@ Test ozone cli
                     Execute on          datanode        ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root
     ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
                     Should contain      ${result}       createdOn
+                    Execute on          datanode        ozone oz -updateVolume http://ksm/hive -user bill -quota 10TB
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+                    Should Be Equal     ${result}       10
                     Execute on          datanode        ozone oz -createBucket http://ksm/hive/bb1
-    ${result}       Execute on          datanode        ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+    ${result} =     Execute on          datanode        ozone oz -infoBucket http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+                    Should Be Equal     ${result}       DISK
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+                    Should Be Equal     ${result}       GROUP
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+                    Should Be Equal     ${result}       USER
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
                     Should Be Equal     ${result}       hive
+                    Execute on          datanode        ozone oz -putKey http://ksm/hive/bb1/key1 -file NOTICE.txt
+                    Execute on          datanode        rm -f NOTICE.txt.1
+                    Execute on          datanode        ozone oz -getKey http://ksm/hive/bb1/key1 -file NOTICE.txt.1
+                    Execute on          datanode        ls -l NOTICE.txt.1
+    ${result} =     Execute on          datanode        ozone oz -infoKey http://ksm/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+                    Should contain      ${result}       createdOn
+    ${result} =     Execute on          datanode        ozone oz -listKey o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+                    Should Be Equal     ${result}       key1
+                    Execute on          datanode        ozone oz -deleteKey http://ksm/hive/bb1/key1 -v
                     Execute on          datanode        ozone oz -deleteBucket http://ksm/hive/bb1
                     Execute on          datanode        ozone oz -deleteVolume http://ksm/hive -user bilbo
 
@@ -106,12 +126,12 @@ Scale datanodes up
 Execute on
     [arguments]     ${componentname}    ${command}
     ${rc}           ${return} =         Run docker compose          exec ${componentname} ${command}
-    Log             ${return}
     [return]        ${return}
 
 Run docker compose
     [arguments]                     ${command}
                                     Set Environment Variable    HADOOPDIR                              ${basedir}/../../hadoop-dist/target/hadoop-${version}
     ${rc}                           ${output} =                 Run And Return Rc And Output           docker-compose -f ${basedir}/target/compose/docker-compose.yaml ${command}
+    Log                             ${output}
     Should Be Equal As Integers     ${rc}                       0
     [return]                            ${rc}                       ${output}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e6ba9b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
index 5c83d9b..6be61e2 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -47,6 +47,7 @@ public final class OzoneClientUtils {
     bucketInfo.setStorageType(bucket.getStorageType());
     bucketInfo.setVersioning(
         OzoneConsts.Versioning.getVersioning(bucket.getVersioning()));
+    bucketInfo.setAcls(bucket.getAcls());
     return bucketInfo;
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: Add 2.9.1 release notes and changes documents

Posted by xy...@apache.org.
Add 2.9.1 release notes and changes documents


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e933c1c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e933c1c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e933c1c3

Branch: refs/heads/HDDS-4
Commit: e933c1c3435c968d5bd9be684e29f7c079a08166
Parents: 05d04b5
Author: littlezhou <we...@intel.com>
Authored: Mon May 14 14:24:01 2018 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../markdown/release/2.9.1/CHANGES.2.9.1.md     | 277 ++++++++++++++++
 .../release/2.9.1/RELEASENOTES.2.9.1.md         |  88 ++++++
 .../jdiff/Apache_Hadoop_HDFS_2.9.1.xml          | 312 +++++++++++++++++++
 hadoop-project-dist/pom.xml                     |   2 +-
 4 files changed, 678 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e933c1c3/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
new file mode 100644
index 0000000..c5e53f6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
@@ -0,0 +1,277 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" Changelog
+
+## Release 2.9.1 - 2018-04-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: Document Router and State Store metrics |  Major | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add ACL support for mount table |  Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath |  Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun Saxena |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | Yiqun Lin |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | Incorporate Aliyun OSS file system implementation |  Major | fs, fs/oss | shimingfei | mingfei.shi |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | CryptoInputStream should implement unbuffer |  Major | fs, security | John Zhuge | John Zhuge |
+| [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | AliyunOSS: backport Aliyun OSS module to branch-2 |  Major | fs/oss | Genmao Yu | SammiChen |
+| [YARN-6851](https://issues.apache.org/jira/browse/YARN-6851) | Capacity Scheduler: document configs for controlling # containers allowed to be allocated per node heartbeat |  Minor | . | Wei Yan | Wei Yan |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve robustness of the AggregatedLogDeletionService |  Major | log-aggregation | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager web UI should display container type in containers page |  Major | nodemanager, webapp | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | AliyunOSS: change the default value of max error retry |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-7642](https://issues.apache.org/jira/browse/YARN-7642) | Add test case to verify context update after container promotion or demotion with or without auto update |  Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15111](https://issues.apache.org/jira/browse/HADOOP-15111) | AliyunOSS: backport HADOOP-14993 to branch-2 |  Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-9023](https://issues.apache.org/jira/browse/HDFS-9023) | When NN is not able to identify DN for replication, reason behind it can be logged |  Critical | hdfs-client, namenode | Surendra Singh Lilhore | Xiao Chen |
+| [YARN-7678](https://issues.apache.org/jira/browse/YARN-7678) | Ability to enable logging of container memory stats |  Major | nodemanager | Jim Brennan | Jim Brennan |
+| [HDFS-12945](https://issues.apache.org/jira/browse/HDFS-12945) | Switch to ClientProtocol instead of NamenodeProtocols in NamenodeWebHdfsMethods |  Minor | . | Wei Yan | Wei Yan |
+| [YARN-7590](https://issues.apache.org/jira/browse/YARN-7590) | Improve container-executor validation check |  Major | security, yarn | Eric Yang | Eric Yang |
+| [HADOOP-15189](https://issues.apache.org/jira/browse/HADOOP-15189) | backport HADOOP-15039 to branch-2 and branch-3 |  Blocker | . | Genmao Yu | Genmao Yu |
+| [HADOOP-15212](https://issues.apache.org/jira/browse/HADOOP-15212) | Add independent secret manager method for logging expired tokens |  Major | security | Daryn Sharp | Daryn Sharp |
+| [YARN-7728](https://issues.apache.org/jira/browse/YARN-7728) | Expose container preemptions related information in Capacity Scheduler queue metrics |  Major | . | Eric Payne | Eric Payne |
+| [MAPREDUCE-7048](https://issues.apache.org/jira/browse/MAPREDUCE-7048) | Uber AM can crash due to unknown task in statusUpdate |  Major | mr-am | Peter Bacsko | Peter Bacsko |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue |  Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica |  Major | datanode | Wei-Chiu Chuang | Gabor Bota |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations |  Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
+| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo |  Major | namenode | Konstantin Shvachko | chencan |
+| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin |  Major | build | Arpit Agarwal | Arpit Agarwal |
+| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation |  Major | . | Arun Suresh | Jonathan Hung |
+| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption |  Major | namenode | Arpit Agarwal | Arpit Agarwal |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13723](https://issues.apache.org/jira/browse/HADOOP-13723) | AliyunOSSInputStream#read() should update read bytes stat correctly |  Major | tools | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14045](https://issues.apache.org/jira/browse/HADOOP-14045) | Aliyun OSS documentation missing from website |  Major | documentation, fs/oss | Andrew Wang | Yiqun Lin |
+| [HADOOP-14458](https://issues.apache.org/jira/browse/HADOOP-14458) | Add missing imports to TestAliyunOSSFileSystemContract.java |  Trivial | fs/oss, test | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14466](https://issues.apache.org/jira/browse/HADOOP-14466) | Remove useless document from TestAliyunOSSFileSystemContract.java |  Minor | documentation | Akira Ajisaka | Chen Liang |
+| [HDFS-12318](https://issues.apache.org/jira/browse/HDFS-12318) | Fix IOException condition for openInfo in DFSInputStream |  Major | . | legend | legend |
+| [HDFS-12614](https://issues.apache.org/jira/browse/HDFS-12614) | FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured |  Major | . | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-12788](https://issues.apache.org/jira/browse/HDFS-12788) | Reset the upload button when file upload fails |  Critical | ui, webhdfs | Brahma Reddy Battula | Brahma Reddy Battula |
+| [YARN-7388](https://issues.apache.org/jira/browse/YARN-7388) | TestAMRestart should be scheduler agnostic |  Major | . | Haibo Chen | Haibo Chen |
+| [HDFS-12705](https://issues.apache.org/jira/browse/HDFS-12705) | WebHdfsFileSystem exceptions should retain the caused by exception |  Major | hdfs | Daryn Sharp | Hanisha Koneru |
+| [YARN-7361](https://issues.apache.org/jira/browse/YARN-7361) | Improve the docker container runtime documentation |  Major | . | Shane Kumpf | Shane Kumpf |
+| [YARN-7469](https://issues.apache.org/jira/browse/YARN-7469) | Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit |  Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7489](https://issues.apache.org/jira/browse/YARN-7489) | ConcurrentModificationException in RMAppImpl#getRMAppMetrics |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7525](https://issues.apache.org/jira/browse/YARN-7525) | Incorrect query parameters in cluster nodes REST API document |  Minor | documentation | Tao Yang | Tao Yang |
+| [HADOOP-15045](https://issues.apache.org/jira/browse/HADOOP-15045) | ISA-L build options are documented in branch-2 |  Major | build, documentation | Akira Ajisaka | Akira Ajisaka |
+| [YARN-7390](https://issues.apache.org/jira/browse/YARN-7390) | All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. |  Major | fairscheduler, reservation system | Yufei Gu | Yufei Gu |
+| [HDFS-12754](https://issues.apache.org/jira/browse/HDFS-12754) | Lease renewal can hit a deadlock |  Major | . | Kuhu Shukla | Kuhu Shukla |
+| [HDFS-12832](https://issues.apache.org/jira/browse/HDFS-12832) | INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit |  Critical | namenode | DENG FEI | Konstantin Shvachko |
+| [HDFS-11754](https://issues.apache.org/jira/browse/HDFS-11754) | Make FsServerDefaults cache configurable. |  Minor | . | Rushabh S Shah | Mikhail Erofeev |
+| [YARN-7509](https://issues.apache.org/jira/browse/YARN-7509) | AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby |  Critical | . | Tao Yang | Tao Yang |
+| [YARN-7558](https://issues.apache.org/jira/browse/YARN-7558) | "yarn logs" command fails to get logs for running containers if UI authentication is enabled. |  Critical | . | Namit Maheshwari | Xuan Gong |
+| [HDFS-12638](https://issues.apache.org/jira/browse/HDFS-12638) | Delete copy-on-truncate block along with the original block, when deleting a file being truncated |  Blocker | hdfs | Jiandan Yang | Konstantin Shvachko |
+| [MAPREDUCE-5124](https://issues.apache.org/jira/browse/MAPREDUCE-5124) | AM lacks flow control for task events |  Major | mr-am | Jason Lowe | Peter Bacsko |
+| [YARN-7455](https://issues.apache.org/jira/browse/YARN-7455) | quote\_and\_append\_arg can overflow buffer |  Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-14985](https://issues.apache.org/jira/browse/HADOOP-14985) | Remove subversion related code from VersionInfoMojo.java |  Minor | build | Akira Ajisaka | Ajay Kumar |
+| [HDFS-12889](https://issues.apache.org/jira/browse/HDFS-12889) | Router UI is missing robots.txt file |  Major | . | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-11576](https://issues.apache.org/jira/browse/HDFS-11576) | Block recovery will fail indefinitely if recovery time \> heartbeat interval |  Critical | datanode, hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [YARN-7607](https://issues.apache.org/jira/browse/YARN-7607) | Remove the trailing duplicated timestamp in container diagnostics message |  Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15080](https://issues.apache.org/jira/browse/HADOOP-15080) | Aliyun OSS: update oss sdk from 2.8.1 to 2.8.3 to remove its dependency on Cat-x "json-lib" |  Blocker | fs/oss | Chris Douglas | SammiChen |
+| [YARN-7591](https://issues.apache.org/jira/browse/YARN-7591) | NPE in async-scheduling mode of CapacityScheduler |  Critical | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7608](https://issues.apache.org/jira/browse/YARN-7608) | Incorrect sTarget column causing DataTable warning on RM application and scheduler web page |  Major | resourcemanager, webapp | Weiwei Yang | Gergely NovƔk |
+| [HDFS-12833](https://issues.apache.org/jira/browse/HDFS-12833) | Distcp : Update the usage of delete option for dependency with update and overwrite option |  Minor | distcp, hdfs | Harshakiran Reddy | usharani |
+| [YARN-7647](https://issues.apache.org/jira/browse/YARN-7647) | NM print inappropriate error log when node-labels is enabled |  Minor | . | Yang Wang | Yang Wang |
+| [HDFS-12907](https://issues.apache.org/jira/browse/HDFS-12907) | Allow read-only access to reserved raw for non-superusers |  Major | namenode | Daryn Sharp | Rushabh S Shah |
+| [HDFS-12881](https://issues.apache.org/jira/browse/HDFS-12881) | Output streams closed with IOUtils suppressing write errors |  Major | . | Jason Lowe | Ajay Kumar |
+| [YARN-7595](https://issues.apache.org/jira/browse/YARN-7595) | Container launching code suppresses close exceptions after writes |  Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-15085](https://issues.apache.org/jira/browse/HADOOP-15085) | Output streams closed with IOUtils suppressing write errors |  Major | . | Jason Lowe | Jim Brennan |
+| [YARN-7661](https://issues.apache.org/jira/browse/YARN-7661) | NodeManager metrics return wrong value after update node resource |  Major | . | Yang Wang | Yang Wang |
+| [HDFS-12347](https://issues.apache.org/jira/browse/HDFS-12347) | TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently |  Critical | test | Xiao Chen | Bharat Viswanadham |
+| [YARN-7542](https://issues.apache.org/jira/browse/YARN-7542) | Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED |  Major | . | Arun Suresh | Sampada Dehankar |
+| [HADOOP-15143](https://issues.apache.org/jira/browse/HADOOP-15143) | NPE due to Invalid KerberosTicket in UGI |  Major | . | Jitendra Nath Pandey | Mukul Kumar Singh |
+| [YARN-7692](https://issues.apache.org/jira/browse/YARN-7692) | Skip validating priority acls while recovering applications |  Blocker | resourcemanager | Charan Hebri | Sunil G |
+| [MAPREDUCE-7028](https://issues.apache.org/jira/browse/MAPREDUCE-7028) | Concurrent task progress updates causing NPE in Application Master |  Blocker | mr-am | Gergo Repas | Gergo Repas |
+| [YARN-7619](https://issues.apache.org/jira/browse/YARN-7619) | Max AM Resource value in Capacity Scheduler UI has to be refreshed for every user |  Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7699](https://issues.apache.org/jira/browse/YARN-7699) | queueUsagePercentage is coming as INF for getApp REST api call |  Major | webapp | Sunil G | Sunil G |
+| [YARN-7508](https://issues.apache.org/jira/browse/YARN-7508) | NPE in FiCaSchedulerApp when debug log enabled in async-scheduling mode |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7663](https://issues.apache.org/jira/browse/YARN-7663) | RMAppImpl:Invalid event: START at KILLED |  Minor | resourcemanager | lujie | lujie |
+| [YARN-6948](https://issues.apache.org/jira/browse/YARN-6948) | Invalid event: ATTEMPT\_ADDED at FINAL\_SAVING |  Minor | yarn | lujie | lujie |
+| [YARN-7735](https://issues.apache.org/jira/browse/YARN-7735) | Fix typo in YARN documentation |  Minor | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-7727](https://issues.apache.org/jira/browse/YARN-7727) | Incorrect log levels in few logs with QueuePriorityContainerCandidateSelector |  Minor | yarn | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-11915](https://issues.apache.org/jira/browse/HDFS-11915) | Sync rbw dir on the first hsync() to avoid file lost on power failure |  Critical | . | Kanaka Kumar Avvaru | Vinayakumar B |
+| [HDFS-9049](https://issues.apache.org/jira/browse/HDFS-9049) | Make Datanode Netty reverse proxy port to be configurable |  Major | datanode | Vinayakumar B | Vinayakumar B |
+| [HADOOP-15150](https://issues.apache.org/jira/browse/HADOOP-15150) | in FsShell, UGI params should be overidden through env vars(-D arg) |  Major | . | Brahma Reddy Battula | Brahma Reddy Battula |
+| [HADOOP-15181](https://issues.apache.org/jira/browse/HADOOP-15181) | Typo in SecureMode.md |  Trivial | documentation | Masahiro Tanaka | Masahiro Tanaka |
+| [YARN-7737](https://issues.apache.org/jira/browse/YARN-7737) | prelaunch.err file not found exception on container failure |  Major | . | Jonathan Hung | Keqiu Hu |
+| [HDFS-13063](https://issues.apache.org/jira/browse/HDFS-13063) | Fix the incorrect spelling in HDFSHighAvailabilityWithQJM.md |  Trivial | documentation | Jianfei Jiang | Jianfei Jiang |
+| [YARN-7102](https://issues.apache.org/jira/browse/YARN-7102) | NM heartbeat stuck when responseId overflows MAX\_INT |  Critical | . | Botong Huang | Botong Huang |
+| [HADOOP-15151](https://issues.apache.org/jira/browse/HADOOP-15151) | MapFile.fix creates a wrong index file in case of block-compressed data file. |  Major | common | Grigori Rybkine | Grigori Rybkine |
+| [MAPREDUCE-7020](https://issues.apache.org/jira/browse/MAPREDUCE-7020) | Task timeout in uber mode can crash AM |  Major | mr-am | Akira Ajisaka | Peter Bacsko |
+| [YARN-7698](https://issues.apache.org/jira/browse/YARN-7698) | A misleading variable's name in ApplicationAttemptEventDispatcher |  Minor | resourcemanager | Jinjiang Ling | Jinjiang Ling |
+| [HDFS-13100](https://issues.apache.org/jira/browse/HDFS-13100) | Handle IllegalArgumentException when GETSERVERDEFAULTS is not implemented in webhdfs. |  Critical | hdfs, webhdfs | Yongjun Zhang | Yongjun Zhang |
+| [YARN-6868](https://issues.apache.org/jira/browse/YARN-6868) | Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml |  Major | yarn | Ray Chiang | Ray Chiang |
+| [YARN-7849](https://issues.apache.org/jira/browse/YARN-7849) | TestMiniYarnClusterNodeUtilization#testUpdateNodeUtilization fails due to heartbeat sync error |  Major | test | Jason Lowe | Botong Huang |
+| [YARN-7801](https://issues.apache.org/jira/browse/YARN-7801) | AmFilterInitializer should addFilter after fill all parameters |  Critical | . | Sumana Sathish | Wangda Tan |
+| [YARN-7890](https://issues.apache.org/jira/browse/YARN-7890) | NPE during container relaunch |  Major | . | Billie Rinaldi | Jason Lowe |
+| [HDFS-12935](https://issues.apache.org/jira/browse/HDFS-12935) | Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up |  Major | tools | Jianfei Jiang | Jianfei Jiang |
+| [HDFS-13120](https://issues.apache.org/jira/browse/HDFS-13120) | Snapshot diff could be corrupted after concat |  Major | namenode, snapshots | Xiaoyu Yao | Xiaoyu Yao |
+| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HDFS-8693](https://issues.apache.org/jira/browse/HDFS-8693) | refreshNamenodes does not support adding a new standby to a running DN |  Critical | datanode, ha | Jian Fang | Ajith S |
+| [MAPREDUCE-7052](https://issues.apache.org/jira/browse/MAPREDUCE-7052) | TestFixedLengthInputFormat#testFormatCompressedIn is flaky |  Major | client, test | Peter Bacsko | Peter Bacsko |
+| [HDFS-13112](https://issues.apache.org/jira/browse/HDFS-13112) | Token expiration edits may cause log corruption or deadlock |  Critical | namenode | Daryn Sharp | Daryn Sharp |
+| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump |  Major | . | Jason Lowe | Jason Lowe |
+| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small |  Major | . | Aki Tanaka | Aki Tanaka |
+| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps |  Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [YARN-7945](https://issues.apache.org/jira/browse/YARN-7945) | Java Doc error in UnmanagedAMPoolManager for branch-2 |  Major | . | Rohith Sharma K S | Botong Huang |
+| [HADOOP-14903](https://issues.apache.org/jira/browse/HADOOP-14903) | Add json-smart explicitly to pom.xml |  Major | common | Ray Chiang | Ray Chiang |
+| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. |  Major | datanode | Harshakiran Reddy | Brahma Reddy Battula |
+| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss |  Major | . | Daryn Sharp | Kihwal Lee |
+| [HADOOP-15251](https://issues.apache.org/jira/browse/HADOOP-15251) | Backport HADOOP-13514 (surefire upgrade) to branch-2 |  Major | test | Chris Douglas | Chris Douglas |
+| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry |  Minor | documentation | Nanda kumar | Nanda kumar |
+| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container |  Major | nodemanager | Tao Yang | Tao Yang |
+| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun |  Minor | test | Gergely NovƔk | Gergely NovƔk |
+| [HADOOP-15283](https://issues.apache.org/jira/browse/HADOOP-15283) | Upgrade from findbugs 3.0.1 to spotbugs 3.1.2 in branch-2 to fix docker image build |  Major | . | Xiao Chen | Akira Ajisaka |
+| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document |  Minor | documentation | Akira Ajisaka | Sen Zhao |
+| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException |  Major | hdfs-client | Xiao Chen | Xiao Chen |
+| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml |  Major | mrv2 | Daniel Templeton | Sen Zhao |
+| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative |  Major | test | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths |  Major | . | ĆĆ±igo Goiri | Xiao Liang |
+| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery |  Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases |  Major | . | Xiao Liang | Xiao Liang |
+| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows |  Major | . | ĆĆ±igo Goiri | Xiao Liang |
+| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7873](https://issues.apache.org/jira/browse/YARN-7873) | Revert YARN-6078 |  Blocker | . | Billie Rinaldi | Billie Rinaldi |
+| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page  cannot display the current value after reconfig |  Minor | datanode | maobaolong | maobaolong |
+| [HADOOP-15320](https://issues.apache.org/jira/browse/HADOOP-15320) | Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake |  Major | fs/adl, fs/azure | shanyu zhao | shanyu zhao |
+| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store |  Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document |  Minor | documentation | Akira Ajisaka | Akira Ajisaka |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14696](https://issues.apache.org/jira/browse/HADOOP-14696) | parallel tests don't work for Windows |  Minor | test | Allen Wittenauer | Allen Wittenauer |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13481](https://issues.apache.org/jira/browse/HADOOP-13481) | User end documents for Aliyun OSS FileSystem |  Minor | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13591](https://issues.apache.org/jira/browse/HADOOP-13591) | Unit test failure in TestOSSContractGetFileStatus and TestOSSContractRootDir |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13624](https://issues.apache.org/jira/browse/HADOOP-13624) | Rename TestAliyunOSSContractDispCp |  Major | fs, fs/oss | Kai Zheng | Genmao Yu |
+| [HADOOP-14065](https://issues.apache.org/jira/browse/HADOOP-14065) | AliyunOSS: oss directory filestatus should use meta time |  Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13768](https://issues.apache.org/jira/browse/HADOOP-13768) | AliyunOSS: handle the failure in the batch delete operation `deleteDirs`. |  Major | fs | Genmao Yu | Genmao Yu |
+| [HADOOP-14069](https://issues.apache.org/jira/browse/HADOOP-14069) | AliyunOSS: listStatus returns wrong file info |  Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13769](https://issues.apache.org/jira/browse/HADOOP-13769) | AliyunOSS: update oss sdk version |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14072](https://issues.apache.org/jira/browse/HADOOP-14072) | AliyunOSS: Failed to read from stream when seek beyond the download size |  Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14192](https://issues.apache.org/jira/browse/HADOOP-14192) | Aliyun OSS FileSystem contract test should implement getTestBaseDir() |  Major | fs/oss | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14194](https://issues.apache.org/jira/browse/HADOOP-14194) | Aliyun OSS should not use empty endpoint as default |  Major | fs/oss | Mingliang Liu | Genmao Yu |
+| [HADOOP-14787](https://issues.apache.org/jira/browse/HADOOP-14787) | AliyunOSS: Implement the `createNonRecursive` operator |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14649](https://issues.apache.org/jira/browse/HADOOP-14649) | Update aliyun-sdk-oss version to 2.8.1 |  Major | fs/oss | Ray Chiang | Genmao Yu |
+| [HADOOP-14799](https://issues.apache.org/jira/browse/HADOOP-14799) | Update nimbus-jose-jwt to 4.41.1 |  Major | . | Ray Chiang | Ray Chiang |
+| [HADOOP-14997](https://issues.apache.org/jira/browse/HADOOP-14997) |  Add hadoop-aliyun as dependency of hadoop-cloud-storage |  Minor | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-12801](https://issues.apache.org/jira/browse/HDFS-12801) | RBF: Set MountTableResolver as default file resolver |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [YARN-7430](https://issues.apache.org/jira/browse/YARN-7430) | Enable user re-mapping for Docker containers by default |  Blocker | security, yarn | Eric Yang | Eric Yang |
+| [YARN-6128](https://issues.apache.org/jira/browse/YARN-6128) | Add support for AMRMProxy HA |  Major | amrmproxy, nodemanager | Subru Krishnan | Botong Huang |
+| [HADOOP-15024](https://issues.apache.org/jira/browse/HADOOP-15024) | AliyunOSS: support user agent configuration and include that & Hadoop version information to oss server |  Major | fs, fs/oss | SammiChen | SammiChen |
+| [HDFS-12858](https://issues.apache.org/jira/browse/HDFS-12858) | RBF: Add router admin commands usage in HDFS commands reference doc |  Minor | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12835](https://issues.apache.org/jira/browse/HDFS-12835) | RBF: Fix Javadoc parameter errors |  Minor | . | Wei Yan | Wei Yan |
+| [YARN-7587](https://issues.apache.org/jira/browse/YARN-7587) | Skip dispatching opportunistic containers to nodes whose queue is already full |  Major | . | Weiwei Yang | Weiwei Yang |
+| [HDFS-12396](https://issues.apache.org/jira/browse/HDFS-12396) | Webhdfs file system should get delegation token from kms provider. |  Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [YARN-6704](https://issues.apache.org/jira/browse/YARN-6704) | Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService |  Major | . | Botong Huang | Botong Huang |
+| [HDFS-12875](https://issues.apache.org/jira/browse/HDFS-12875) | RBF: Complete logic for -readonly option of dfsrouteradmin add command |  Major | . | Yiqun Lin | ĆĆ±igo Goiri |
+| [YARN-7630](https://issues.apache.org/jira/browse/YARN-7630) | Fix AMRMToken rollover handling in AMRMProxy |  Minor | . | Botong Huang | Botong Huang |
+| [HDFS-12937](https://issues.apache.org/jira/browse/HDFS-12937) | RBF: Add more unit tests for router admin commands |  Major | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-12988](https://issues.apache.org/jira/browse/HDFS-12988) | RBF: Mount table entries not properly updated in the local cache |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | backport HADOOP-15086 rename fix to branch-2 |  Major | fs/azure | Thomas Marquardt | Thomas Marquardt |
+| [YARN-7716](https://issues.apache.org/jira/browse/YARN-7716) | metricsTimeStart and metricsTimeEnd should be all lower case in the doc |  Major | timelinereader | Haibo Chen | Haibo Chen |
+| [HDFS-12802](https://issues.apache.org/jira/browse/HDFS-12802) | RBF: Control MountTableResolver cache size |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13028](https://issues.apache.org/jira/browse/HDFS-13028) | RBF: Fix spurious TestRouterRpc#testProxyGetStats |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [YARN-5094](https://issues.apache.org/jira/browse/YARN-5094) | some YARN container events have timestamp of -1 |  Critical | . | Sangjin Lee | Haibo Chen |
+| [YARN-7782](https://issues.apache.org/jira/browse/YARN-7782) | Enable user re-mapping for Docker containers in yarn-default.xml |  Blocker | security, yarn | Eric Yang | Eric Yang |
+| [HDFS-12772](https://issues.apache.org/jira/browse/HDFS-12772) | RBF: Federation Router State State Store internal API |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13042](https://issues.apache.org/jira/browse/HDFS-13042) | RBF: Heartbeat Router State |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13049](https://issues.apache.org/jira/browse/HDFS-13049) | RBF: Inconsistent Router OPTS config in branch-2 and branch-3 |  Minor | . | Wei Yan | Wei Yan |
+| [HDFS-12574](https://issues.apache.org/jira/browse/HDFS-12574) | Add CryptoInputStream to WebHdfsFileSystem read call. |  Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [HDFS-13044](https://issues.apache.org/jira/browse/HDFS-13044) | RBF: Add a safe mode for the Router |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13043](https://issues.apache.org/jira/browse/HDFS-13043) | RBF: Expose the state of the Routers in the federation |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode |  Major | . | ĆĆ±igo Goiri | Yiqun Lin |
+| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters |  Major | . | ĆĆ±igo Goiri | Yiqun Lin |
+| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI |  Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries |  Minor | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue |  Major | federation, hdfs | maobaolong | maobaolong |
+| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration |  Major | . | Tao Jie | Yiqun Lin |
+| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns |  Minor | . | Wei Yan | Chao Sun |
+| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path |  Major | hdfs | wangzhiyuan | wangzhiyuan |
+| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue |  Major | federation, hdfs | Weiwei Wu | Weiwei Wu |
+| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection |  Minor | . | Wei Yan | Ekanth S |
+| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions |  Minor | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures |  Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use |  Major | hdfs, test | maobaolong | maobaolong |
+| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement |  Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed |  Major | hdfs | maobaolong | maobaolong |
+| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router |  Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module |  Major | . | ĆĆ±igo Goiri | Wei Yan |
+| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf |  Minor | . | ĆĆ±igo Goiri | Ekanth S |
+| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS |  Major | fs | ĆĆ±igo Goiri | Wei Yan |
+| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver |  Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon |  Minor | . | liuhongtong | liuhongtong |
+| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml |  Major | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-8010](https://issues.apache.org/jira/browse/YARN-8010) | Add config in FederationRMFailoverProxy to not bypass facade cache when failing over |  Minor | . | Botong Huang | Botong Huang |
+| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction |  Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
+| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 |  Major | fs/adl | Ray Chiang | Ray Chiang |
+| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism |  Major | fs/oss | Genmao Yu | Genmao Yu |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15149](https://issues.apache.org/jira/browse/HADOOP-15149) | CryptoOutputStream should implement StreamCapabilities |  Major | fs | Mike Drob | Xiao Chen |
+| [YARN-7691](https://issues.apache.org/jira/browse/YARN-7691) | Add Unit Tests for ContainersLauncher |  Major | . | Sampada Dehankar | Sampada Dehankar |
+| [HADOOP-15177](https://issues.apache.org/jira/browse/HADOOP-15177) | Update the release year to 2018 |  Blocker | build | Akira Ajisaka | Bharat Viswanadham |
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e933c1c3/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
new file mode 100644
index 0000000..bed70b1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
@@ -0,0 +1,88 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop"  2.9.1 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | *Major* | **Incorporate Aliyun OSS file system implementation**
+
+Aliyun OSS is widely used among Chinaā€™s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | *Major* | **AliyunOSS: backport Aliyun OSS module to branch-2**
+
+Aliyun OSS is widely used among Chinaā€™s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss:// scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | **RBF: Document Router and State Store metrics**
+
+This JIRA makes following change:
+Change Router metrics context from 'router' to 'dfs'.
+
+
+---
+
+* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | **RBF: Add ACL support for mount table**
+
+Mount tables support ACL, The users won't be able to modify their own entries (we are assuming these old (no-permissions before) mount table with owner:superuser, group:supergroup, permission:755 as the default permissions).  The fix way is login as superuser to modify these mount table entries.
+
+
+---
+
+* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | **Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath**
+
+Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath.
+
+
+---
+
+* [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | *Major* | **backport HADOOP-15086 rename fix to branch-2**
+
+[WASB] Fix Azure implementation of Filesystem.rename to ensure that at most one operation succeeds when there are multiple, concurrent rename operations targeting the same destination file.
+
+
+---
+
+* [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | *Major* | **AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance**
+
+Support multi-thread pre-read in AliyunOSSInputStream to improve the sequential read performance from Hadoop to Aliyun OSS.
+
+
+---
+
+* [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | *Major* | **RBF: Fix doc error setting up client**
+
+Fix the document error of setting up HFDS Router Federation
+
+
+---
+
+* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store**
+
+Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e933c1c3/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
new file mode 100644
index 0000000..a5d87c7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Mon Apr 16 12:03:07 UTC 2018 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 2.9.1"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.9.1.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.9.1.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/1.3.1/json-smart-1.3.1.jar:/maven/org/apache/directory/serv
 er/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.9.1.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus
 /jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/commons/commons-lang3/3.4/commons-lang3-3.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoug
 htworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.9.1.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortb
 ay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
 -0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.9.1 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e933c1c3/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index cfaa698..5f83da3 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
         <activeByDefault>false</activeByDefault>
       </activation>
       <properties>
-        <jdiff.stable.api>3.0.2</jdiff.stable.api>
+        <jdiff.stable.api>2.9.1</jdiff.stable.api>
         <jdiff.stability>-unstable</jdiff.stability>
         <!-- Commented out for HADOOP-11776 -->
         <!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HDFS-13544. Improve logging for JournalNode in federated cluster.

Posted by xy...@apache.org.
HDFS-13544. Improve logging for JournalNode in federated cluster.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f821a71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f821a71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f821a71

Branch: refs/heads/HDDS-4
Commit: 5f821a713895529adeab9959df4d5929448dd98a
Parents: e8d7a99
Author: Hanisha Koneru <ha...@apache.org>
Authored: Mon May 14 10:12:08 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:10 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/qjournal/server/Journal.java    | 115 +++++++++++--------
 1 file changed, 64 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f821a71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 408ce76..452664a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -208,11 +208,12 @@ public class Journal implements Closeable {
     while (!files.isEmpty()) {
       EditLogFile latestLog = files.remove(files.size() - 1);
       latestLog.scanLog(Long.MAX_VALUE, false);
-      LOG.info("Latest log is " + latestLog);
+      LOG.info("Latest log is " + latestLog + " ; journal id: " + journalId);
       if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
         // the log contains no transactions
         LOG.warn("Latest log " + latestLog + " has no transactions. " +
-            "moving it aside and looking for previous log");
+            "moving it aside and looking for previous log"
+            + " ; journal id: " + journalId);
         latestLog.moveAsideEmptyFile();
       } else {
         return latestLog;
@@ -230,7 +231,7 @@ public class Journal implements Closeable {
     Preconditions.checkState(nsInfo.getNamespaceID() != 0,
         "can't format with uninitialized namespace info: %s",
         nsInfo);
-    LOG.info("Formatting " + this + " with namespace info: " +
+    LOG.info("Formatting journal id : " + journalId + " with namespace info: " +
         nsInfo);
     storage.format(nsInfo);
     refreshCachedData();
@@ -323,7 +324,7 @@ public class Journal implements Closeable {
     // any other that we've promised. 
     if (epoch <= getLastPromisedEpoch()) {
       throw new IOException("Proposed epoch " + epoch + " <= last promise " +
-          getLastPromisedEpoch());
+          getLastPromisedEpoch() + " ; journal id: " + journalId);
     }
     
     updateLastPromisedEpoch(epoch);
@@ -343,7 +344,8 @@ public class Journal implements Closeable {
 
   private void updateLastPromisedEpoch(long newEpoch) throws IOException {
     LOG.info("Updating lastPromisedEpoch from " + lastPromisedEpoch.get() +
-        " to " + newEpoch + " for client " + Server.getRemoteIp());
+        " to " + newEpoch + " for client " + Server.getRemoteIp() +
+        " ; journal id: " + journalId);
     lastPromisedEpoch.set(newEpoch);
     
     // Since we have a new writer, reset the IPC serial - it will start
@@ -378,7 +380,7 @@ public class Journal implements Closeable {
     }
 
     checkSync(curSegment != null,
-        "Can't write, no segment open");
+        "Can't write, no segment open" + " ; journal id: " + journalId);
 
     if (curSegmentTxId != segmentTxId) {
       // Sanity check: it is possible that the writer will fail IPCs
@@ -389,17 +391,20 @@ public class Journal implements Closeable {
       // and throw an exception.
       JournalOutOfSyncException e = new JournalOutOfSyncException(
           "Writer out of sync: it thinks it is writing segment " + segmentTxId
-          + " but current segment is " + curSegmentTxId);
+              + " but current segment is " + curSegmentTxId
+              + " ; journal id: " + journalId);
       abortCurSegment();
       throw e;
     }
       
     checkSync(nextTxId == firstTxnId,
-        "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
+        "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId
+            + " ; journal id: " + journalId);
     
     long lastTxnId = firstTxnId + numTxns - 1;
     if (LOG.isTraceEnabled()) {
-      LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId);
+      LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId +
+          " ; journal id: " + journalId);
     }
 
     // If the edit has already been marked as committed, we know
@@ -423,7 +428,7 @@ public class Journal implements Closeable {
 
     if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) {
       LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
-               " took " + milliSeconds + "ms");
+               " took " + milliSeconds + "ms" + " ; journal id: " + journalId);
     }
 
     if (isLagging) {
@@ -455,7 +460,7 @@ public class Journal implements Closeable {
     if (reqInfo.getEpoch() < lastPromisedEpoch.get()) {
       throw new IOException("IPC's epoch " + reqInfo.getEpoch() +
           " is less than the last promised epoch " +
-          lastPromisedEpoch.get());
+          lastPromisedEpoch.get() + " ; journal id: " + journalId);
     } else if (reqInfo.getEpoch() > lastPromisedEpoch.get()) {
       // A newer client has arrived. Fence any previous writers by updating
       // the promise.
@@ -465,16 +470,16 @@ public class Journal implements Closeable {
     // Ensure that the IPCs are arriving in-order as expected.
     checkSync(reqInfo.getIpcSerialNumber() > currentEpochIpcSerial,
         "IPC serial %s from client %s was not higher than prior highest " +
-        "IPC serial %s", reqInfo.getIpcSerialNumber(),
-        Server.getRemoteIp(),
-        currentEpochIpcSerial);
+        "IPC serial %s ; journal id: %s", reqInfo.getIpcSerialNumber(),
+        Server.getRemoteIp(), currentEpochIpcSerial, journalId);
     currentEpochIpcSerial = reqInfo.getIpcSerialNumber();
 
     if (reqInfo.hasCommittedTxId()) {
       Preconditions.checkArgument(
           reqInfo.getCommittedTxId() >= committedTxnId.get(),
           "Client trying to move committed txid backward from " +
-          committedTxnId.get() + " to " + reqInfo.getCommittedTxId());
+          committedTxnId.get() + " to " + reqInfo.getCommittedTxId() +
+              " ; journal id: " + journalId);
       
       committedTxnId.set(reqInfo.getCommittedTxId());
     }
@@ -486,7 +491,7 @@ public class Journal implements Closeable {
     if (reqInfo.getEpoch() != lastWriterEpoch.get()) {
       throw new IOException("IPC's epoch " + reqInfo.getEpoch() +
           " is not the current writer epoch  " +
-          lastWriterEpoch.get());
+          lastWriterEpoch.get() + " ; journal id: " + journalId);
     }
   }
   
@@ -497,7 +502,8 @@ public class Journal implements Closeable {
   private void checkFormatted() throws JournalNotFormattedException {
     if (!isFormatted()) {
       throw new JournalNotFormattedException("Journal " +
-          storage.getSingularStorageDir() + " not formatted");
+          storage.getSingularStorageDir() + " not formatted" +
+          " ; journal id: " + journalId);
     }
   }
 
@@ -542,7 +548,8 @@ public class Journal implements Closeable {
     if (curSegment != null) {
       LOG.warn("Client is requesting a new log segment " + txid + 
           " though we are already writing " + curSegment + ". " +
-          "Aborting the current segment in order to begin the new one.");
+          "Aborting the current segment in order to begin the new one." +
+          " ; journal id: " + journalId);
       // The writer may have lost a connection to us and is now
       // re-connecting after the connection came back.
       // We should abort our own old segment.
@@ -556,7 +563,7 @@ public class Journal implements Closeable {
     if (existing != null) {
       if (!existing.isInProgress()) {
         throw new IllegalStateException("Already have a finalized segment " +
-            existing + " beginning at " + txid);
+            existing + " beginning at " + txid + " ; journal id: " + journalId);
       }
       
       // If it's in-progress, it should only contain one transaction,
@@ -565,7 +572,8 @@ public class Journal implements Closeable {
       existing.scanLog(Long.MAX_VALUE, false);
       if (existing.getLastTxId() != existing.getFirstTxId()) {
         throw new IllegalStateException("The log file " +
-            existing + " seems to contain valid transactions");
+            existing + " seems to contain valid transactions" +
+            " ; journal id: " + journalId);
       }
     }
     
@@ -573,7 +581,7 @@ public class Journal implements Closeable {
     if (curLastWriterEpoch != reqInfo.getEpoch()) {
       LOG.info("Updating lastWriterEpoch from " + curLastWriterEpoch +
           " to " + reqInfo.getEpoch() + " for client " +
-          Server.getRemoteIp());
+          Server.getRemoteIp() + " ; journal id: " + journalId);
       lastWriterEpoch.set(reqInfo.getEpoch());
     }
 
@@ -608,8 +616,8 @@ public class Journal implements Closeable {
       
       checkSync(nextTxId == endTxId + 1,
           "Trying to finalize in-progress log segment %s to end at " +
-          "txid %s but only written up to txid %s",
-          startTxId, endTxId, nextTxId - 1);
+          "txid %s but only written up to txid %s ; journal id: %s",
+          startTxId, endTxId, nextTxId - 1, journalId);
       // No need to validate the edit log if the client is finalizing
       // the log segment that it was just writing to.
       needsValidation = false;
@@ -618,25 +626,27 @@ public class Journal implements Closeable {
     FileJournalManager.EditLogFile elf = fjm.getLogFile(startTxId);
     if (elf == null) {
       throw new JournalOutOfSyncException("No log file to finalize at " +
-          "transaction ID " + startTxId);
+          "transaction ID " + startTxId + " ; journal id: " + journalId);
     }
 
     if (elf.isInProgress()) {
       if (needsValidation) {
         LOG.info("Validating log segment " + elf.getFile() + " about to be " +
-            "finalized");
+            "finalized ; journal id: " + journalId);
         elf.scanLog(Long.MAX_VALUE, false);
   
         checkSync(elf.getLastTxId() == endTxId,
             "Trying to finalize in-progress log segment %s to end at " +
-            "txid %s but log %s on disk only contains up to txid %s",
-            startTxId, endTxId, elf.getFile(), elf.getLastTxId());
+            "txid %s but log %s on disk only contains up to txid %s " +
+            "; journal id: %s",
+            startTxId, endTxId, elf.getFile(), elf.getLastTxId(), journalId);
       }
       fjm.finalizeLogSegment(startTxId, endTxId);
     } else {
       Preconditions.checkArgument(endTxId == elf.getLastTxId(),
           "Trying to re-finalize already finalized log " +
-              elf + " with different endTxId " + endTxId);
+              elf + " with different endTxId " + endTxId +
+              " ; journal id: " + journalId);
     }
 
     // Once logs are finalized, a different length will never be decided.
@@ -667,7 +677,8 @@ public class Journal implements Closeable {
     File paxosFile = storage.getPaxosFile(segmentTxId);
     if (paxosFile.exists()) {
       if (!paxosFile.delete()) {
-        throw new IOException("Unable to delete paxos file " + paxosFile);
+        throw new IOException("Unable to delete paxos file " + paxosFile +
+            " ; journal id: " + journalId);
       }
     }
   }
@@ -717,7 +728,7 @@ public class Journal implements Closeable {
     }
     if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
       LOG.info("Edit log file " + elf + " appears to be empty. " +
-          "Moving it aside...");
+          "Moving it aside..." + " ; journal id: " + journalId);
       elf.moveAsideEmptyFile();
       return null;
     }
@@ -727,7 +738,7 @@ public class Journal implements Closeable {
         .setIsInProgress(elf.isInProgress())
         .build();
     LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
-        TextFormat.shortDebugString(ret));
+        TextFormat.shortDebugString(ret) + " ; journal id: " + journalId);
     return ret;
   }
 
@@ -771,7 +782,7 @@ public class Journal implements Closeable {
     
     PrepareRecoveryResponseProto resp = builder.build();
     LOG.info("Prepared recovery for segment " + segmentTxId + ": " +
-        TextFormat.shortDebugString(resp));
+        TextFormat.shortDebugString(resp) + " ; journal id: " + journalId);
     return resp;
   }
   
@@ -792,8 +803,8 @@ public class Journal implements Closeable {
     // at least one transaction.
     Preconditions.checkArgument(segment.getEndTxId() > 0 &&
         segment.getEndTxId() >= segmentTxId,
-        "bad recovery state for segment %s: %s",
-        segmentTxId, TextFormat.shortDebugString(segment));
+        "bad recovery state for segment %s: %s ; journal id: %s",
+        segmentTxId, TextFormat.shortDebugString(segment), journalId);
     
     PersistedRecoveryPaxosData oldData = getPersistedPaxosData(segmentTxId);
     PersistedRecoveryPaxosData newData = PersistedRecoveryPaxosData.newBuilder()
@@ -806,8 +817,9 @@ public class Journal implements Closeable {
     // checkRequest() call above should filter non-increasing epoch numbers.
     if (oldData != null) {
       alwaysAssert(oldData.getAcceptedInEpoch() <= reqInfo.getEpoch(),
-          "Bad paxos transition, out-of-order epochs.\nOld: %s\nNew: %s\n",
-          oldData, newData);
+          "Bad paxos transition, out-of-order epochs.\nOld: %s\nNew: " +
+              "%s\nJournalId: %s\n",
+          oldData, newData, journalId);
     }
     
     File syncedFile = null;
@@ -817,7 +829,7 @@ public class Journal implements Closeable {
         currentSegment.getEndTxId() != segment.getEndTxId()) {
       if (currentSegment == null) {
         LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
-            ": no current segment in place");
+            ": no current segment in place ; journal id: " + journalId);
         
         // Update the highest txid for lag metrics
         updateHighestWrittenTxId(Math.max(segment.getEndTxId(),
@@ -825,7 +837,7 @@ public class Journal implements Closeable {
       } else {
         LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
             ": old segment " + TextFormat.shortDebugString(currentSegment) +
-            " is not the right length");
+            " is not the right length ; journal id: " + journalId);
         
         // Paranoid sanity check: if the new log is shorter than the log we
         // currently have, we should not end up discarding any transactions
@@ -838,14 +850,15 @@ public class Journal implements Closeable {
               " with new segment " +
               TextFormat.shortDebugString(segment) + 
               ": would discard already-committed txn " +
-              committedTxnId.get());
+              committedTxnId.get() +
+              " ; journal id: " + journalId);
         }
         
         // Another paranoid check: we should not be asked to synchronize a log
         // on top of a finalized segment.
         alwaysAssert(currentSegment.getIsInProgress(),
-            "Should never be asked to synchronize a different log on top of an " +
-            "already-finalized segment");
+            "Should never be asked to synchronize a different log on top of " +
+            "an already-finalized segment ; journal id: " + journalId);
         
         // If we're shortening the log, update our highest txid
         // used for lag metrics.
@@ -858,7 +871,7 @@ public class Journal implements Closeable {
     } else {
       LOG.info("Skipping download of log " +
           TextFormat.shortDebugString(segment) +
-          ": already have up-to-date logs");
+          ": already have up-to-date logs ; journal id: " + journalId);
     }
     
     // This is one of the few places in the protocol where we have a single
@@ -890,12 +903,12 @@ public class Journal implements Closeable {
     }
 
     LOG.info("Accepted recovery for segment " + segmentTxId + ": " +
-        TextFormat.shortDebugString(newData));
+        TextFormat.shortDebugString(newData) + " ; journal id: " + journalId);
   }
 
   private LongRange txnRange(SegmentStateProto seg) {
     Preconditions.checkArgument(seg.hasEndTxId(),
-        "invalid segment: %s", seg);
+        "invalid segment: %s ; journal id: %s", seg, journalId);
     return new LongRange(seg.getStartTxId(), seg.getEndTxId());
   }
 
@@ -970,7 +983,7 @@ public class Journal implements Closeable {
     if (tmp.exists()) {
       File dst = storage.getInProgressEditLog(segmentId);
       LOG.info("Rolling forward previously half-completed synchronization: " +
-          tmp + " -> " + dst);
+          tmp + " -> " + dst + " ; journal id: " + journalId);
       FileUtil.replaceFile(tmp, dst);
     }
   }
@@ -991,8 +1004,8 @@ public class Journal implements Closeable {
       PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
       Preconditions.checkState(ret != null &&
           ret.getSegmentState().getStartTxId() == segmentTxId,
-          "Bad persisted data for segment %s: %s",
-          segmentTxId, ret);
+          "Bad persisted data for segment %s: %s ; journal id: %s",
+          segmentTxId, ret, journalId);
       return ret;
     } finally {
       IOUtils.closeStream(in);
@@ -1041,7 +1054,7 @@ public class Journal implements Closeable {
     storage.cTime = sInfo.cTime;
     int oldLV = storage.getLayoutVersion();
     storage.layoutVersion = sInfo.layoutVersion;
-    LOG.info("Starting upgrade of edits directory: "
+    LOG.info("Starting upgrade of edits directory: " + storage.getRoot()
         + ".\n   old LV = " + oldLV
         + "; old CTime = " + oldCTime
         + ".\n   new LV = " + storage.getLayoutVersion()
@@ -1112,7 +1125,7 @@ public class Journal implements Closeable {
     if (endTxId <= committedTxnId.get()) {
       if (!finalFile.getParentFile().exists()) {
         LOG.error(finalFile.getParentFile() + " doesn't exist. Aborting tmp " +
-            "segment move to current directory");
+            "segment move to current directory ; journal id: " + journalId);
         return false;
       }
       Files.move(tmpFile.toPath(), finalFile.toPath(),
@@ -1122,13 +1135,13 @@ public class Journal implements Closeable {
       } else {
         success = false;
         LOG.warn("Unable to move edits file from " + tmpFile + " to " +
-            finalFile);
+            finalFile + " ; journal id: " + journalId);
       }
     } else {
       success = false;
       LOG.error("The endTxId of the temporary file is not less than the " +
           "last committed transaction id. Aborting move to final file" +
-          finalFile);
+          finalFile + " ; journal id: " + journalId);
     }
 
     return success;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDDS-31. Fix TestSCMCli. Contributed by Lokesh Jain.

Posted by xy...@apache.org.
HDDS-31. Fix TestSCMCli. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e229dcbd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e229dcbd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e229dcbd

Branch: refs/heads/HDDS-4
Commit: e229dcbd8d0d32c6487faaf17dcc42fdebef4e82
Parents: 757bc8a
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 10 12:43:13 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../container/common/helpers/ContainerInfo.java | 26 +++++++
 .../cli/container/CreateContainerHandler.java   |  8 +-
 .../scm/cli/container/ListContainerHandler.java |  8 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java | 80 ++++++++++----------
 4 files changed, 74 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e229dcbd/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 0bd4c26..10fd96c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.hdds.scm.container.common.helpers;
 
+import com.fasterxml.jackson.annotation.JsonAutoDetect;
+import com.fasterxml.jackson.annotation.PropertyAccessor;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
@@ -25,6 +29,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.util.Time;
 
+import java.io.IOException;
 import java.util.Comparator;
 
 /**
@@ -32,6 +37,17 @@ import java.util.Comparator;
  */
 public class ContainerInfo
     implements Comparator<ContainerInfo>, Comparable<ContainerInfo> {
+
+  private static final ObjectWriter WRITER;
+
+  static {
+    ObjectMapper mapper = new ObjectMapper();
+    mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
+    mapper
+        .setVisibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE);
+    WRITER = mapper.writer();
+  }
+
   private HddsProtos.LifeCycleState state;
   private Pipeline pipeline;
   // Bytes allocated by SCM for clients.
@@ -255,6 +271,16 @@ public class ContainerInfo
   }
 
   /**
+   * Returns a JSON string of this object.
+   *
+   * @return String - json string
+   * @throws IOException
+   */
+  public String toJsonString() throws IOException {
+    return WRITER.writeValueAsString(this);
+  }
+
+  /**
    * Builder class for ContainerInfo.
    */
   public static class Builder {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e229dcbd/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
index c0ff1f7..278ee30 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
@@ -44,6 +44,10 @@ public class CreateContainerHandler extends OzoneCommandHandler {
 
   @Override
   public void execute(CommandLine cmd) throws IOException {
+    if (cmd.hasOption(HELP_OP)) {
+      displayHelp();
+    }
+
     if (!cmd.hasOption(CONTAINER_CREATE)) {
       throw new IOException("Expecting container create");
     }
@@ -57,7 +61,7 @@ public class CreateContainerHandler extends OzoneCommandHandler {
   public void displayHelp() {
     Options options = new Options();
     HelpFormatter helpFormatter = new HelpFormatter();
-    helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -create <option>",
-        "where <option> is", options, "");
+    helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -create",
+        null, options, null);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e229dcbd/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
index 42dae65..3483b3e 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
@@ -24,7 +24,6 @@ import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
 import java.io.IOException;
@@ -87,14 +86,15 @@ public class ListContainerHandler extends OzoneCommandHandler {
 
     // Output data list
     for (ContainerInfo container : containerList) {
-      outputContainerPipeline(container.getPipeline());
+      outputContainerInfo(container);
     }
   }
 
-  private void outputContainerPipeline(Pipeline pipeline) throws IOException {
+  private void outputContainerInfo(ContainerInfo containerInfo)
+      throws IOException {
     // Print container report info.
     logOut("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        pipeline.toJsonString()));
+        containerInfo.toJsonString()));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e229dcbd/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 888b72e..2d8577c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -55,7 +55,6 @@ import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 import static org.junit.Assert.assertFalse;
 /**
  * This class tests the CLI of SCM.
@@ -126,21 +125,12 @@ public class TestSCMCli {
 
   @Test
   public void testCreateContainer() throws Exception {
-    long containerID = ContainerTestHelper.getTestContainerID();
-    try {
-      scm.getClientProtocolServer().getContainer(containerID);
-      fail("should not be able to get the container");
-    } catch (IOException ioe) {
-      assertTrue(ioe.getMessage().contains(
-          "Specified key does not exist. key : " + containerID));
-    }
-    String[] args = {"-container", "-create", "-c",
-        Long.toString(containerID)};
+    ByteArrayOutputStream testContent = new ByteArrayOutputStream();
+    PrintStream testPrintOut = new PrintStream(testContent);
+    System.setOut(testPrintOut);
+    String[] args = {"-container", "-create"};
     assertEquals(ResultCode.SUCCESS, cli.run(args));
-    ContainerInfo container = scm.getClientProtocolServer()
-        .getContainer(containerID);
-    assertNotNull(container);
-    assertEquals(containerID, container.containerID());
+    assertEquals("", testContent.toString());
   }
 
   private boolean containerExist(long containerID) {
@@ -215,9 +205,9 @@ public class TestSCMCli {
     ContainerInfo emptyContainer = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    containerOperationClient.closeContainer(container.getContainerID(),
+    containerOperationClient.closeContainer(emptyContainer.getContainerID(),
         container.getPipeline());
-    Assert.assertTrue(containerExist(container.getContainerID()));
+    Assert.assertTrue(containerExist(emptyContainer.getContainerID()));
 
     // Successfully delete an empty container.
     delCmd = new String[] {"-container", "-delete", "-c",
@@ -252,7 +242,7 @@ public class TestSCMCli {
     DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
         .getDatanodeDetails();
     String formatStr =
-        "Container Name: %s\n" +
+        "Container id: %s\n" +
         "Container State: %s\n" +
         "Container DB Path: %s\n" +
         "Container Path: %s\n" +
@@ -261,7 +251,7 @@ public class TestSCMCli {
         "Datanodes: [%s]\n";
 
     String formatStrWithHash =
-        "Container Name: %s\n" +
+        "Container id: %s\n" +
         "Container State: %s\n" +
         "Container Hash: %s\n" +
         "Container DB Path: %s\n" +
@@ -271,8 +261,9 @@ public class TestSCMCli {
         "Datanodes: [%s]\n";
 
     // Test a non-exist container
-    String cname = "nonExistContainer";
-    String[] info = {"-container", "-info", cname};
+    String containerID =
+        Long.toString(ContainerTestHelper.getTestContainerID());
+    String[] info = { "-container", "-info", containerID };
     int exitCode = runCommandAndGetOutput(info, null, null);
     assertEquals("Expected Execution Error, Did not find that.",
         EXECUTION_ERROR, exitCode);
@@ -286,14 +277,16 @@ public class TestSCMCli {
             readContainer(container.getContainerID(),
                 container.getPipeline()), conf);
 
-    info = new String[]{"-container", "-info", "-c", cname};
+    info = new String[] { "-container", "-info", "-c",
+        Long.toString(container.getContainerID()) };
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     exitCode = runCommandAndGetOutput(info, out, null);
     assertEquals("Expected Success, did not find it.", ResultCode.SUCCESS,
             exitCode);
 
     String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
-    String expected = String.format(formatStr, cname, openStatus,
+    String expected =
+        String.format(formatStr, container.getContainerID(), openStatus,
         data.getDBPath(), data.getContainerPath(), "",
         datanodeDetails.getHostName(), datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
@@ -307,14 +300,16 @@ public class TestSCMCli {
     data = ContainerData
         .getFromProtBuf(containerOperationClient.readContainer(
             container.getContainerID(), container.getPipeline()), conf);
-    KeyUtils.getDB(data, conf).put(cname.getBytes(), "someKey".getBytes());
+    KeyUtils.getDB(data, conf)
+        .put(containerID.getBytes(), "someKey".getBytes());
 
-    info = new String[]{"-container", "-info", "-c", cname};
+    info = new String[] { "-container", "-info", "-c",
+        Long.toString(container.getContainerID()) };
     exitCode = runCommandAndGetOutput(info, out, null);
     assertEquals(ResultCode.SUCCESS, exitCode);
 
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
-    expected = String.format(formatStr, cname, openStatus,
+    expected = String.format(formatStr, container.getContainerID(), openStatus,
         data.getDBPath(), data.getContainerPath(), "",
         datanodeDetails.getHostName(), datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
@@ -326,7 +321,8 @@ public class TestSCMCli {
     containerOperationClient.closeContainer(
         container.getContainerID(), container.getPipeline());
 
-    info = new String[] {"-container", "-info", "-c", cname};
+    info = new String[] { "-container", "-info", "-c",
+        Long.toString(container.getContainerID()) };
     exitCode = runCommandAndGetOutput(info, out, null);
     assertEquals(ResultCode.SUCCESS, exitCode);
     data = ContainerData
@@ -334,9 +330,10 @@ public class TestSCMCli {
             container.getContainerID(), container.getPipeline()), conf);
 
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
-    expected = String.format(formatStrWithHash, cname, openStatus,
-        data.getHash(), data.getDBPath(), data.getContainerPath(),
-        "", datanodeDetails.getHostName(), datanodeDetails.getHostName());
+    expected = String
+        .format(formatStrWithHash, container.getContainerID(), openStatus,
+            data.getHash(), data.getDBPath(), data.getContainerPath(), "",
+            datanodeDetails.getHostName(), datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
   }
 
@@ -413,16 +410,16 @@ public class TestSCMCli {
     args = new String[] {"-container", "-list", "-start",
         startContainerIDStr};
     exitCode = runCommandAndGetOutput(args, out, err);
-    assertEquals(ResultCode.SUCCESS, exitCode);
-    assertTrue(out.toString().isEmpty());
+    assertEquals(ResultCode.EXECUTION_ERROR, exitCode);
+    assertTrue(err.toString().contains(
+        "java.io.IOException: Expecting container count"));
   }
 
   @Test
   public void testCloseContainer() throws Exception {
-    long containerID = ContainerTestHelper.getTestContainerID();
-    String[] args = {"-container", "-create", "-c",
-        Long.toString(containerID)};
-    assertEquals(ResultCode.SUCCESS, cli.run(args));
+    long containerID = containerOperationClient
+        .createContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner).getContainerID();
     ContainerInfo container = scm.getClientProtocolServer()
         .getContainer(containerID);
     assertNotNull(container);
@@ -477,7 +474,7 @@ public class TestSCMCli {
     String[] args2 = {"-container", "-create", "-help"};
     assertEquals(ResultCode.SUCCESS, cli.run(args2));
     String expected2 =
-        "usage: hdfs scm -container -create\n";
+        "usage: hdfs scm -container -create\n\n";
     assertEquals(expected2, testContent.toString());
     testContent.reset();
 
@@ -502,11 +499,10 @@ public class TestSCMCli {
 
     String[] args5 = {"-container", "-list", "-help"};
     assertEquals(ResultCode.SUCCESS, cli.run(args5));
-    String expected5 =
-        "usage: hdfs scm -container -list <option>\n" +
-            "where <option> can be the following\n" +
-            " -start <arg>    Specify start container id, required\n" +
-            " -count <arg>   Specify count number name\n";
+    String expected5 = "usage: hdfs scm -container -list <option>\n"
+        + "where <option> can be the following\n"
+        + " -count <arg>   Specify count number, required\n"
+        + " -start <arg>   Specify start container id\n";
     assertEquals(expected5, testContent.toString());
     testContent.reset();
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: Revert "Add 2.9.1 release notes and changes documents"

Posted by xy...@apache.org.
Revert "Add 2.9.1 release notes and changes documents"

This reverts commit e4dc346d651de4c9af05a9616f8fe6369895d8af.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/187a00f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/187a00f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/187a00f8

Branch: refs/heads/HDDS-4
Commit: 187a00f87ace2ad074cb9827879ecab5cc172ce8
Parents: e933c1c
Author: sammichen <sa...@intel.com>
Authored: Mon May 14 14:38:40 2018 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../markdown/release/2.9.1/CHANGES.2.9.1.md     | 277 ----------------
 .../release/2.9.1/RELEASENOTES.2.9.1.md         |  88 ------
 .../jdiff/Apache_Hadoop_HDFS_2.9.1.xml          | 312 -------------------
 hadoop-project-dist/pom.xml                     |   2 +-
 4 files changed, 1 insertion(+), 678 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/187a00f8/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
deleted file mode 100644
index c5e53f6..0000000
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
+++ /dev/null
@@ -1,277 +0,0 @@
-
-<!---
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
--->
-# "Apache Hadoop" Changelog
-
-## Release 2.9.1 - 2018-04-16
-
-### INCOMPATIBLE CHANGES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: Document Router and State Store metrics |  Major | documentation | Yiqun Lin | Yiqun Lin |
-| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add ACL support for mount table |  Major | . | Yiqun Lin | Yiqun Lin |
-| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath |  Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun Saxena |
-| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | Yiqun Lin |
-
-
-### IMPORTANT ISSUES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix doc error setting up client |  Major | federation | tartarus | tartarus |
-
-
-### NEW FEATURES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | Incorporate Aliyun OSS file system implementation |  Major | fs, fs/oss | shimingfei | mingfei.shi |
-
-
-### IMPROVEMENTS:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | CryptoInputStream should implement unbuffer |  Major | fs, security | John Zhuge | John Zhuge |
-| [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | AliyunOSS: backport Aliyun OSS module to branch-2 |  Major | fs/oss | Genmao Yu | SammiChen |
-| [YARN-6851](https://issues.apache.org/jira/browse/YARN-6851) | Capacity Scheduler: document configs for controlling # containers allowed to be allocated per node heartbeat |  Minor | . | Wei Yan | Wei Yan |
-| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve robustness of the AggregatedLogDeletionService |  Major | log-aggregation | Jonathan Eagles | Jonathan Eagles |
-| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager web UI should display container type in containers page |  Major | nodemanager, webapp | Weiwei Yang | Weiwei Yang |
-| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | Jack Bearden |
-| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John Zhuge | John Zhuge |
-| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | AliyunOSS: change the default value of max error retry |  Major | fs/oss | wujinhu | wujinhu |
-| [YARN-7642](https://issues.apache.org/jira/browse/YARN-7642) | Add test case to verify context update after container promotion or demotion with or without auto update |  Minor | nodemanager | Weiwei Yang | Weiwei Yang |
-| [HADOOP-15111](https://issues.apache.org/jira/browse/HADOOP-15111) | AliyunOSS: backport HADOOP-14993 to branch-2 |  Major | fs/oss | Genmao Yu | Genmao Yu |
-| [HDFS-9023](https://issues.apache.org/jira/browse/HDFS-9023) | When NN is not able to identify DN for replication, reason behind it can be logged |  Critical | hdfs-client, namenode | Surendra Singh Lilhore | Xiao Chen |
-| [YARN-7678](https://issues.apache.org/jira/browse/YARN-7678) | Ability to enable logging of container memory stats |  Major | nodemanager | Jim Brennan | Jim Brennan |
-| [HDFS-12945](https://issues.apache.org/jira/browse/HDFS-12945) | Switch to ClientProtocol instead of NamenodeProtocols in NamenodeWebHdfsMethods |  Minor | . | Wei Yan | Wei Yan |
-| [YARN-7590](https://issues.apache.org/jira/browse/YARN-7590) | Improve container-executor validation check |  Major | security, yarn | Eric Yang | Eric Yang |
-| [HADOOP-15189](https://issues.apache.org/jira/browse/HADOOP-15189) | backport HADOOP-15039 to branch-2 and branch-3 |  Blocker | . | Genmao Yu | Genmao Yu |
-| [HADOOP-15212](https://issues.apache.org/jira/browse/HADOOP-15212) | Add independent secret manager method for logging expired tokens |  Major | security | Daryn Sharp | Daryn Sharp |
-| [YARN-7728](https://issues.apache.org/jira/browse/YARN-7728) | Expose container preemptions related information in Capacity Scheduler queue metrics |  Major | . | Eric Payne | Eric Payne |
-| [MAPREDUCE-7048](https://issues.apache.org/jira/browse/MAPREDUCE-7048) | Uber AM can crash due to unknown task in statusUpdate |  Major | mr-am | Peter Bacsko | Peter Bacsko |
-| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
-| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue |  Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
-| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica |  Major | datanode | Wei-Chiu Chuang | Gabor Bota |
-| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations |  Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
-| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo |  Major | namenode | Konstantin Shvachko | chencan |
-| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin |  Major | build | Arpit Agarwal | Arpit Agarwal |
-| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation |  Major | . | Arun Suresh | Jonathan Hung |
-| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption |  Major | namenode | Arpit Agarwal | Arpit Agarwal |
-
-
-### BUG FIXES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-13723](https://issues.apache.org/jira/browse/HADOOP-13723) | AliyunOSSInputStream#read() should update read bytes stat correctly |  Major | tools | Mingliang Liu | Mingliang Liu |
-| [HADOOP-14045](https://issues.apache.org/jira/browse/HADOOP-14045) | Aliyun OSS documentation missing from website |  Major | documentation, fs/oss | Andrew Wang | Yiqun Lin |
-| [HADOOP-14458](https://issues.apache.org/jira/browse/HADOOP-14458) | Add missing imports to TestAliyunOSSFileSystemContract.java |  Trivial | fs/oss, test | Mingliang Liu | Mingliang Liu |
-| [HADOOP-14466](https://issues.apache.org/jira/browse/HADOOP-14466) | Remove useless document from TestAliyunOSSFileSystemContract.java |  Minor | documentation | Akira Ajisaka | Chen Liang |
-| [HDFS-12318](https://issues.apache.org/jira/browse/HDFS-12318) | Fix IOException condition for openInfo in DFSInputStream |  Major | . | legend | legend |
-| [HDFS-12614](https://issues.apache.org/jira/browse/HDFS-12614) | FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured |  Major | . | Manoj Govindassamy | Manoj Govindassamy |
-| [HDFS-12788](https://issues.apache.org/jira/browse/HDFS-12788) | Reset the upload button when file upload fails |  Critical | ui, webhdfs | Brahma Reddy Battula | Brahma Reddy Battula |
-| [YARN-7388](https://issues.apache.org/jira/browse/YARN-7388) | TestAMRestart should be scheduler agnostic |  Major | . | Haibo Chen | Haibo Chen |
-| [HDFS-12705](https://issues.apache.org/jira/browse/HDFS-12705) | WebHdfsFileSystem exceptions should retain the caused by exception |  Major | hdfs | Daryn Sharp | Hanisha Koneru |
-| [YARN-7361](https://issues.apache.org/jira/browse/YARN-7361) | Improve the docker container runtime documentation |  Major | . | Shane Kumpf | Shane Kumpf |
-| [YARN-7469](https://issues.apache.org/jira/browse/YARN-7469) | Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit |  Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
-| [YARN-7489](https://issues.apache.org/jira/browse/YARN-7489) | ConcurrentModificationException in RMAppImpl#getRMAppMetrics |  Major | capacityscheduler | Tao Yang | Tao Yang |
-| [YARN-7525](https://issues.apache.org/jira/browse/YARN-7525) | Incorrect query parameters in cluster nodes REST API document |  Minor | documentation | Tao Yang | Tao Yang |
-| [HADOOP-15045](https://issues.apache.org/jira/browse/HADOOP-15045) | ISA-L build options are documented in branch-2 |  Major | build, documentation | Akira Ajisaka | Akira Ajisaka |
-| [YARN-7390](https://issues.apache.org/jira/browse/YARN-7390) | All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. |  Major | fairscheduler, reservation system | Yufei Gu | Yufei Gu |
-| [HDFS-12754](https://issues.apache.org/jira/browse/HDFS-12754) | Lease renewal can hit a deadlock |  Major | . | Kuhu Shukla | Kuhu Shukla |
-| [HDFS-12832](https://issues.apache.org/jira/browse/HDFS-12832) | INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit |  Critical | namenode | DENG FEI | Konstantin Shvachko |
-| [HDFS-11754](https://issues.apache.org/jira/browse/HDFS-11754) | Make FsServerDefaults cache configurable. |  Minor | . | Rushabh S Shah | Mikhail Erofeev |
-| [YARN-7509](https://issues.apache.org/jira/browse/YARN-7509) | AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby |  Critical | . | Tao Yang | Tao Yang |
-| [YARN-7558](https://issues.apache.org/jira/browse/YARN-7558) | "yarn logs" command fails to get logs for running containers if UI authentication is enabled. |  Critical | . | Namit Maheshwari | Xuan Gong |
-| [HDFS-12638](https://issues.apache.org/jira/browse/HDFS-12638) | Delete copy-on-truncate block along with the original block, when deleting a file being truncated |  Blocker | hdfs | Jiandan Yang | Konstantin Shvachko |
-| [MAPREDUCE-5124](https://issues.apache.org/jira/browse/MAPREDUCE-5124) | AM lacks flow control for task events |  Major | mr-am | Jason Lowe | Peter Bacsko |
-| [YARN-7455](https://issues.apache.org/jira/browse/YARN-7455) | quote\_and\_append\_arg can overflow buffer |  Major | nodemanager | Jason Lowe | Jim Brennan |
-| [HADOOP-14985](https://issues.apache.org/jira/browse/HADOOP-14985) | Remove subversion related code from VersionInfoMojo.java |  Minor | build | Akira Ajisaka | Ajay Kumar |
-| [HDFS-12889](https://issues.apache.org/jira/browse/HDFS-12889) | Router UI is missing robots.txt file |  Major | . | Bharat Viswanadham | Bharat Viswanadham |
-| [HDFS-11576](https://issues.apache.org/jira/browse/HDFS-11576) | Block recovery will fail indefinitely if recovery time \> heartbeat interval |  Critical | datanode, hdfs, namenode | Lukas Majercak | Lukas Majercak |
-| [YARN-7607](https://issues.apache.org/jira/browse/YARN-7607) | Remove the trailing duplicated timestamp in container diagnostics message |  Minor | nodemanager | Weiwei Yang | Weiwei Yang |
-| [HADOOP-15080](https://issues.apache.org/jira/browse/HADOOP-15080) | Aliyun OSS: update oss sdk from 2.8.1 to 2.8.3 to remove its dependency on Cat-x "json-lib" |  Blocker | fs/oss | Chris Douglas | SammiChen |
-| [YARN-7591](https://issues.apache.org/jira/browse/YARN-7591) | NPE in async-scheduling mode of CapacityScheduler |  Critical | capacityscheduler | Tao Yang | Tao Yang |
-| [YARN-7608](https://issues.apache.org/jira/browse/YARN-7608) | Incorrect sTarget column causing DataTable warning on RM application and scheduler web page |  Major | resourcemanager, webapp | Weiwei Yang | Gergely NovƔk |
-| [HDFS-12833](https://issues.apache.org/jira/browse/HDFS-12833) | Distcp : Update the usage of delete option for dependency with update and overwrite option |  Minor | distcp, hdfs | Harshakiran Reddy | usharani |
-| [YARN-7647](https://issues.apache.org/jira/browse/YARN-7647) | NM print inappropriate error log when node-labels is enabled |  Minor | . | Yang Wang | Yang Wang |
-| [HDFS-12907](https://issues.apache.org/jira/browse/HDFS-12907) | Allow read-only access to reserved raw for non-superusers |  Major | namenode | Daryn Sharp | Rushabh S Shah |
-| [HDFS-12881](https://issues.apache.org/jira/browse/HDFS-12881) | Output streams closed with IOUtils suppressing write errors |  Major | . | Jason Lowe | Ajay Kumar |
-| [YARN-7595](https://issues.apache.org/jira/browse/YARN-7595) | Container launching code suppresses close exceptions after writes |  Major | nodemanager | Jason Lowe | Jim Brennan |
-| [HADOOP-15085](https://issues.apache.org/jira/browse/HADOOP-15085) | Output streams closed with IOUtils suppressing write errors |  Major | . | Jason Lowe | Jim Brennan |
-| [YARN-7661](https://issues.apache.org/jira/browse/YARN-7661) | NodeManager metrics return wrong value after update node resource |  Major | . | Yang Wang | Yang Wang |
-| [HDFS-12347](https://issues.apache.org/jira/browse/HDFS-12347) | TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently |  Critical | test | Xiao Chen | Bharat Viswanadham |
-| [YARN-7542](https://issues.apache.org/jira/browse/YARN-7542) | Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED |  Major | . | Arun Suresh | Sampada Dehankar |
-| [HADOOP-15143](https://issues.apache.org/jira/browse/HADOOP-15143) | NPE due to Invalid KerberosTicket in UGI |  Major | . | Jitendra Nath Pandey | Mukul Kumar Singh |
-| [YARN-7692](https://issues.apache.org/jira/browse/YARN-7692) | Skip validating priority acls while recovering applications |  Blocker | resourcemanager | Charan Hebri | Sunil G |
-| [MAPREDUCE-7028](https://issues.apache.org/jira/browse/MAPREDUCE-7028) | Concurrent task progress updates causing NPE in Application Master |  Blocker | mr-am | Gergo Repas | Gergo Repas |
-| [YARN-7619](https://issues.apache.org/jira/browse/YARN-7619) | Max AM Resource value in Capacity Scheduler UI has to be refreshed for every user |  Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
-| [YARN-7699](https://issues.apache.org/jira/browse/YARN-7699) | queueUsagePercentage is coming as INF for getApp REST api call |  Major | webapp | Sunil G | Sunil G |
-| [YARN-7508](https://issues.apache.org/jira/browse/YARN-7508) | NPE in FiCaSchedulerApp when debug log enabled in async-scheduling mode |  Major | capacityscheduler | Tao Yang | Tao Yang |
-| [YARN-7663](https://issues.apache.org/jira/browse/YARN-7663) | RMAppImpl:Invalid event: START at KILLED |  Minor | resourcemanager | lujie | lujie |
-| [YARN-6948](https://issues.apache.org/jira/browse/YARN-6948) | Invalid event: ATTEMPT\_ADDED at FINAL\_SAVING |  Minor | yarn | lujie | lujie |
-| [YARN-7735](https://issues.apache.org/jira/browse/YARN-7735) | Fix typo in YARN documentation |  Minor | documentation | Takanobu Asanuma | Takanobu Asanuma |
-| [YARN-7727](https://issues.apache.org/jira/browse/YARN-7727) | Incorrect log levels in few logs with QueuePriorityContainerCandidateSelector |  Minor | yarn | Prabhu Joseph | Prabhu Joseph |
-| [HDFS-11915](https://issues.apache.org/jira/browse/HDFS-11915) | Sync rbw dir on the first hsync() to avoid file lost on power failure |  Critical | . | Kanaka Kumar Avvaru | Vinayakumar B |
-| [HDFS-9049](https://issues.apache.org/jira/browse/HDFS-9049) | Make Datanode Netty reverse proxy port to be configurable |  Major | datanode | Vinayakumar B | Vinayakumar B |
-| [HADOOP-15150](https://issues.apache.org/jira/browse/HADOOP-15150) | in FsShell, UGI params should be overidden through env vars(-D arg) |  Major | . | Brahma Reddy Battula | Brahma Reddy Battula |
-| [HADOOP-15181](https://issues.apache.org/jira/browse/HADOOP-15181) | Typo in SecureMode.md |  Trivial | documentation | Masahiro Tanaka | Masahiro Tanaka |
-| [YARN-7737](https://issues.apache.org/jira/browse/YARN-7737) | prelaunch.err file not found exception on container failure |  Major | . | Jonathan Hung | Keqiu Hu |
-| [HDFS-13063](https://issues.apache.org/jira/browse/HDFS-13063) | Fix the incorrect spelling in HDFSHighAvailabilityWithQJM.md |  Trivial | documentation | Jianfei Jiang | Jianfei Jiang |
-| [YARN-7102](https://issues.apache.org/jira/browse/YARN-7102) | NM heartbeat stuck when responseId overflows MAX\_INT |  Critical | . | Botong Huang | Botong Huang |
-| [HADOOP-15151](https://issues.apache.org/jira/browse/HADOOP-15151) | MapFile.fix creates a wrong index file in case of block-compressed data file. |  Major | common | Grigori Rybkine | Grigori Rybkine |
-| [MAPREDUCE-7020](https://issues.apache.org/jira/browse/MAPREDUCE-7020) | Task timeout in uber mode can crash AM |  Major | mr-am | Akira Ajisaka | Peter Bacsko |
-| [YARN-7698](https://issues.apache.org/jira/browse/YARN-7698) | A misleading variable's name in ApplicationAttemptEventDispatcher |  Minor | resourcemanager | Jinjiang Ling | Jinjiang Ling |
-| [HDFS-13100](https://issues.apache.org/jira/browse/HDFS-13100) | Handle IllegalArgumentException when GETSERVERDEFAULTS is not implemented in webhdfs. |  Critical | hdfs, webhdfs | Yongjun Zhang | Yongjun Zhang |
-| [YARN-6868](https://issues.apache.org/jira/browse/YARN-6868) | Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml |  Major | yarn | Ray Chiang | Ray Chiang |
-| [YARN-7849](https://issues.apache.org/jira/browse/YARN-7849) | TestMiniYarnClusterNodeUtilization#testUpdateNodeUtilization fails due to heartbeat sync error |  Major | test | Jason Lowe | Botong Huang |
-| [YARN-7801](https://issues.apache.org/jira/browse/YARN-7801) | AmFilterInitializer should addFilter after fill all parameters |  Critical | . | Sumana Sathish | Wangda Tan |
-| [YARN-7890](https://issues.apache.org/jira/browse/YARN-7890) | NPE during container relaunch |  Major | . | Billie Rinaldi | Jason Lowe |
-| [HDFS-12935](https://issues.apache.org/jira/browse/HDFS-12935) | Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up |  Major | tools | Jianfei Jiang | Jianfei Jiang |
-| [HDFS-13120](https://issues.apache.org/jira/browse/HDFS-13120) | Snapshot diff could be corrupted after concat |  Major | namenode, snapshots | Xiaoyu Yao | Xiaoyu Yao |
-| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
-| [HDFS-8693](https://issues.apache.org/jira/browse/HDFS-8693) | refreshNamenodes does not support adding a new standby to a running DN |  Critical | datanode, ha | Jian Fang | Ajith S |
-| [MAPREDUCE-7052](https://issues.apache.org/jira/browse/MAPREDUCE-7052) | TestFixedLengthInputFormat#testFormatCompressedIn is flaky |  Major | client, test | Peter Bacsko | Peter Bacsko |
-| [HDFS-13112](https://issues.apache.org/jira/browse/HDFS-13112) | Token expiration edits may cause log corruption or deadlock |  Critical | namenode | Daryn Sharp | Daryn Sharp |
-| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump |  Major | . | Jason Lowe | Jason Lowe |
-| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small |  Major | . | Aki Tanaka | Aki Tanaka |
-| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps |  Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
-| [YARN-7945](https://issues.apache.org/jira/browse/YARN-7945) | Java Doc error in UnmanagedAMPoolManager for branch-2 |  Major | . | Rohith Sharma K S | Botong Huang |
-| [HADOOP-14903](https://issues.apache.org/jira/browse/HADOOP-14903) | Add json-smart explicitly to pom.xml |  Major | common | Ray Chiang | Ray Chiang |
-| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. |  Major | datanode | Harshakiran Reddy | Brahma Reddy Battula |
-| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss |  Major | . | Daryn Sharp | Kihwal Lee |
-| [HADOOP-15251](https://issues.apache.org/jira/browse/HADOOP-15251) | Backport HADOOP-13514 (surefire upgrade) to branch-2 |  Major | test | Chris Douglas | Chris Douglas |
-| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry |  Minor | documentation | Nanda kumar | Nanda kumar |
-| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container |  Major | nodemanager | Tao Yang | Tao Yang |
-| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun |  Minor | test | Gergely NovƔk | Gergely NovƔk |
-| [HADOOP-15283](https://issues.apache.org/jira/browse/HADOOP-15283) | Upgrade from findbugs 3.0.1 to spotbugs 3.1.2 in branch-2 to fix docker image build |  Major | . | Xiao Chen | Akira Ajisaka |
-| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document |  Minor | documentation | Akira Ajisaka | Sen Zhao |
-| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException |  Major | hdfs-client | Xiao Chen | Xiao Chen |
-| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
-| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml |  Major | mrv2 | Daniel Templeton | Sen Zhao |
-| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative |  Major | test | Akira Ajisaka | Akira Ajisaka |
-| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths |  Major | . | ĆĆ±igo Goiri | Xiao Liang |
-| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time |  Major | capacityscheduler | Tao Yang | Tao Yang |
-| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery |  Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
-| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases |  Major | . | Xiao Liang | Xiao Liang |
-| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows |  Major | . | ĆĆ±igo Goiri | Xiao Liang |
-| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread |  Major | . | Jonathan Eagles | Jonathan Eagles |
-| [YARN-7873](https://issues.apache.org/jira/browse/YARN-7873) | Revert YARN-6078 |  Blocker | . | Billie Rinaldi | Billie Rinaldi |
-| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page  cannot display the current value after reconfig |  Minor | datanode | maobaolong | maobaolong |
-| [HADOOP-15320](https://issues.apache.org/jira/browse/HADOOP-15320) | Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake |  Major | fs/adl, fs/azure | shanyu zhao | shanyu zhao |
-| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store |  Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
-| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document |  Minor | documentation | Akira Ajisaka | Akira Ajisaka |
-
-
-### TESTS:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-14696](https://issues.apache.org/jira/browse/HADOOP-14696) | parallel tests don't work for Windows |  Minor | test | Allen Wittenauer | Allen Wittenauer |
-
-
-### SUB-TASKS:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-13481](https://issues.apache.org/jira/browse/HADOOP-13481) | User end documents for Aliyun OSS FileSystem |  Minor | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-13591](https://issues.apache.org/jira/browse/HADOOP-13591) | Unit test failure in TestOSSContractGetFileStatus and TestOSSContractRootDir |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-13624](https://issues.apache.org/jira/browse/HADOOP-13624) | Rename TestAliyunOSSContractDispCp |  Major | fs, fs/oss | Kai Zheng | Genmao Yu |
-| [HADOOP-14065](https://issues.apache.org/jira/browse/HADOOP-14065) | AliyunOSS: oss directory filestatus should use meta time |  Major | fs/oss | Fei Hui | Fei Hui |
-| [HADOOP-13768](https://issues.apache.org/jira/browse/HADOOP-13768) | AliyunOSS: handle the failure in the batch delete operation `deleteDirs`. |  Major | fs | Genmao Yu | Genmao Yu |
-| [HADOOP-14069](https://issues.apache.org/jira/browse/HADOOP-14069) | AliyunOSS: listStatus returns wrong file info |  Major | fs/oss | Fei Hui | Fei Hui |
-| [HADOOP-13769](https://issues.apache.org/jira/browse/HADOOP-13769) | AliyunOSS: update oss sdk version |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-14072](https://issues.apache.org/jira/browse/HADOOP-14072) | AliyunOSS: Failed to read from stream when seek beyond the download size |  Major | fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-14192](https://issues.apache.org/jira/browse/HADOOP-14192) | Aliyun OSS FileSystem contract test should implement getTestBaseDir() |  Major | fs/oss | Mingliang Liu | Mingliang Liu |
-| [HADOOP-14194](https://issues.apache.org/jira/browse/HADOOP-14194) | Aliyun OSS should not use empty endpoint as default |  Major | fs/oss | Mingliang Liu | Genmao Yu |
-| [HADOOP-14787](https://issues.apache.org/jira/browse/HADOOP-14787) | AliyunOSS: Implement the `createNonRecursive` operator |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-14649](https://issues.apache.org/jira/browse/HADOOP-14649) | Update aliyun-sdk-oss version to 2.8.1 |  Major | fs/oss | Ray Chiang | Genmao Yu |
-| [HADOOP-14799](https://issues.apache.org/jira/browse/HADOOP-14799) | Update nimbus-jose-jwt to 4.41.1 |  Major | . | Ray Chiang | Ray Chiang |
-| [HADOOP-14997](https://issues.apache.org/jira/browse/HADOOP-14997) |  Add hadoop-aliyun as dependency of hadoop-cloud-storage |  Minor | fs/oss | Genmao Yu | Genmao Yu |
-| [HDFS-12801](https://issues.apache.org/jira/browse/HDFS-12801) | RBF: Set MountTableResolver as default file resolver |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [YARN-7430](https://issues.apache.org/jira/browse/YARN-7430) | Enable user re-mapping for Docker containers by default |  Blocker | security, yarn | Eric Yang | Eric Yang |
-| [YARN-6128](https://issues.apache.org/jira/browse/YARN-6128) | Add support for AMRMProxy HA |  Major | amrmproxy, nodemanager | Subru Krishnan | Botong Huang |
-| [HADOOP-15024](https://issues.apache.org/jira/browse/HADOOP-15024) | AliyunOSS: support user agent configuration and include that & Hadoop version information to oss server |  Major | fs, fs/oss | SammiChen | SammiChen |
-| [HDFS-12858](https://issues.apache.org/jira/browse/HDFS-12858) | RBF: Add router admin commands usage in HDFS commands reference doc |  Minor | documentation | Yiqun Lin | Yiqun Lin |
-| [HDFS-12835](https://issues.apache.org/jira/browse/HDFS-12835) | RBF: Fix Javadoc parameter errors |  Minor | . | Wei Yan | Wei Yan |
-| [YARN-7587](https://issues.apache.org/jira/browse/YARN-7587) | Skip dispatching opportunistic containers to nodes whose queue is already full |  Major | . | Weiwei Yang | Weiwei Yang |
-| [HDFS-12396](https://issues.apache.org/jira/browse/HDFS-12396) | Webhdfs file system should get delegation token from kms provider. |  Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
-| [YARN-6704](https://issues.apache.org/jira/browse/YARN-6704) | Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService |  Major | . | Botong Huang | Botong Huang |
-| [HDFS-12875](https://issues.apache.org/jira/browse/HDFS-12875) | RBF: Complete logic for -readonly option of dfsrouteradmin add command |  Major | . | Yiqun Lin | ĆĆ±igo Goiri |
-| [YARN-7630](https://issues.apache.org/jira/browse/YARN-7630) | Fix AMRMToken rollover handling in AMRMProxy |  Minor | . | Botong Huang | Botong Huang |
-| [HDFS-12937](https://issues.apache.org/jira/browse/HDFS-12937) | RBF: Add more unit tests for router admin commands |  Major | test | Yiqun Lin | Yiqun Lin |
-| [HDFS-12988](https://issues.apache.org/jira/browse/HDFS-12988) | RBF: Mount table entries not properly updated in the local cache |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | backport HADOOP-15086 rename fix to branch-2 |  Major | fs/azure | Thomas Marquardt | Thomas Marquardt |
-| [YARN-7716](https://issues.apache.org/jira/browse/YARN-7716) | metricsTimeStart and metricsTimeEnd should be all lower case in the doc |  Major | timelinereader | Haibo Chen | Haibo Chen |
-| [HDFS-12802](https://issues.apache.org/jira/browse/HDFS-12802) | RBF: Control MountTableResolver cache size |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance |  Major | fs/oss | wujinhu | wujinhu |
-| [HDFS-13028](https://issues.apache.org/jira/browse/HDFS-13028) | RBF: Fix spurious TestRouterRpc#testProxyGetStats |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [YARN-5094](https://issues.apache.org/jira/browse/YARN-5094) | some YARN container events have timestamp of -1 |  Critical | . | Sangjin Lee | Haibo Chen |
-| [YARN-7782](https://issues.apache.org/jira/browse/YARN-7782) | Enable user re-mapping for Docker containers in yarn-default.xml |  Blocker | security, yarn | Eric Yang | Eric Yang |
-| [HDFS-12772](https://issues.apache.org/jira/browse/HDFS-12772) | RBF: Federation Router State State Store internal API |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-13042](https://issues.apache.org/jira/browse/HDFS-13042) | RBF: Heartbeat Router State |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-13049](https://issues.apache.org/jira/browse/HDFS-13049) | RBF: Inconsistent Router OPTS config in branch-2 and branch-3 |  Minor | . | Wei Yan | Wei Yan |
-| [HDFS-12574](https://issues.apache.org/jira/browse/HDFS-12574) | Add CryptoInputStream to WebHdfsFileSystem read call. |  Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
-| [HDFS-13044](https://issues.apache.org/jira/browse/HDFS-13044) | RBF: Add a safe mode for the Router |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-13043](https://issues.apache.org/jira/browse/HDFS-13043) | RBF: Expose the state of the Routers in the federation |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode |  Major | . | ĆĆ±igo Goiri | Yiqun Lin |
-| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters |  Major | . | ĆĆ±igo Goiri | Yiqun Lin |
-| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI |  Minor | . | Wei Yan | Wei Yan |
-| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries |  Minor | test | Yiqun Lin | Yiqun Lin |
-| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue |  Major | federation, hdfs | maobaolong | maobaolong |
-| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration |  Major | . | Tao Jie | Yiqun Lin |
-| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns |  Minor | . | Wei Yan | Chao Sun |
-| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path |  Major | hdfs | wangzhiyuan | wangzhiyuan |
-| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue |  Major | federation, hdfs | Weiwei Wu | Weiwei Wu |
-| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection |  Minor | . | Wei Yan | Ekanth S |
-| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions |  Minor | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures |  Major | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use |  Major | hdfs, test | maobaolong | maobaolong |
-| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement |  Major | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed |  Major | hdfs | maobaolong | maobaolong |
-| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router |  Minor | . | Wei Yan | Wei Yan |
-| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory |  Major | fs/oss | wujinhu | wujinhu |
-| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module |  Major | . | ĆĆ±igo Goiri | Wei Yan |
-| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf |  Minor | . | ĆĆ±igo Goiri | Ekanth S |
-| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS |  Major | fs | ĆĆ±igo Goiri | Wei Yan |
-| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver |  Major | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon |  Minor | . | liuhongtong | liuhongtong |
-| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml |  Major | documentation | Takanobu Asanuma | Takanobu Asanuma |
-| [YARN-8010](https://issues.apache.org/jira/browse/YARN-8010) | Add config in FederationRMFailoverProxy to not bypass facade cache when failing over |  Minor | . | Botong Huang | Botong Huang |
-| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction |  Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
-| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
-| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 |  Major | fs/adl | Ray Chiang | Ray Chiang |
-| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism |  Major | fs/oss | Genmao Yu | Genmao Yu |
-
-
-### OTHER:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-15149](https://issues.apache.org/jira/browse/HADOOP-15149) | CryptoOutputStream should implement StreamCapabilities |  Major | fs | Mike Drob | Xiao Chen |
-| [YARN-7691](https://issues.apache.org/jira/browse/YARN-7691) | Add Unit Tests for ContainersLauncher |  Major | . | Sampada Dehankar | Sampada Dehankar |
-| [HADOOP-15177](https://issues.apache.org/jira/browse/HADOOP-15177) | Update the release year to 2018 |  Blocker | build | Akira Ajisaka | Bharat Viswanadham |
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/187a00f8/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
deleted file mode 100644
index bed70b1..0000000
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-<!---
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
--->
-# "Apache Hadoop"  2.9.1 Release Notes
-
-These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
-
-
----
-
-* [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | *Major* | **Incorporate Aliyun OSS file system implementation**
-
-Aliyun OSS is widely used among Chinaā€™s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss scheme, similar to the s3a and azure support.
-
-
----
-
-* [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | *Major* | **AliyunOSS: backport Aliyun OSS module to branch-2**
-
-Aliyun OSS is widely used among Chinaā€™s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss:// scheme, similar to the s3a and azure support.
-
-
----
-
-* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | **RBF: Document Router and State Store metrics**
-
-This JIRA makes following change:
-Change Router metrics context from 'router' to 'dfs'.
-
-
----
-
-* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | **RBF: Add ACL support for mount table**
-
-Mount tables support ACL, The users won't be able to modify their own entries (we are assuming these old (no-permissions before) mount table with owner:superuser, group:supergroup, permission:755 as the default permissions).  The fix way is login as superuser to modify these mount table entries.
-
-
----
-
-* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | **Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath**
-
-Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath.
-
-
----
-
-* [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | *Major* | **backport HADOOP-15086 rename fix to branch-2**
-
-[WASB] Fix Azure implementation of Filesystem.rename to ensure that at most one operation succeeds when there are multiple, concurrent rename operations targeting the same destination file.
-
-
----
-
-* [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | *Major* | **AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance**
-
-Support multi-thread pre-read in AliyunOSSInputStream to improve the sequential read performance from Hadoop to Aliyun OSS.
-
-
----
-
-* [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | *Major* | **RBF: Fix doc error setting up client**
-
-Fix the document error of setting up HFDS Router Federation
-
-
----
-
-* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store**
-
-Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured.
-
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/187a00f8/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
deleted file mode 100644
index a5d87c7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
+++ /dev/null
@@ -1,312 +0,0 @@
-<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
-<!-- Generated by the JDiff Javadoc doclet -->
-<!-- (http://www.jdiff.org) -->
-<!-- on Mon Apr 16 12:03:07 UTC 2018 -->
-
-<api
-  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
-  xsi:noNamespaceSchemaLocation='api.xsd'
-  name="Apache Hadoop HDFS 2.9.1"
-  jdversion="1.0.9">
-
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.9.1.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.9.1.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/1.3.1/json-smart-1.3.1.jar:/maven/org/apache/directory/serv
 er/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.9.1.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus
 /jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/commons/commons-lang3/3.4/commons-lang3-3.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoug
 htworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.9.1.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortb
 ay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
 -0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.9.1 -->
-<package name="org.apache.hadoop.hdfs">
-  <doc>
-  <![CDATA[<p>A distributed implementation of {@link
-org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
-Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
-
-<p>The most important difference is that unlike GFS, Hadoop DFS files 
-have strictly one writer at any one time.  Bytes are always appended 
-to the end of the writer's stream.  There is no notion of "record appends"
-or "mutations" that are then checked or reordered.  Writers simply emit 
-a byte stream.  That byte stream is guaranteed to be stored in the 
-order written.</p>]]>
-  </doc>
-</package>
-<package name="org.apache.hadoop.hdfs.net">
-</package>
-<package name="org.apache.hadoop.hdfs.protocol">
-</package>
-<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
-</package>
-<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
-</package>
-<package name="org.apache.hadoop.hdfs.protocolPB">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.client">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.protocol">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.server">
-  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
-  <interface name="JournalNodeMXBean"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getJournalsStatus" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
- 
- @return A string presenting status for each journal]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[This is the JMX management interface for JournalNode information]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
-</package>
-<package name="org.apache.hadoop.hdfs.security.token.block">
-</package>
-<package name="org.apache.hadoop.hdfs.security.token.delegation">
-</package>
-<package name="org.apache.hadoop.hdfs.server.balancer">
-</package>
-<package name="org.apache.hadoop.hdfs.server.blockmanagement">
-</package>
-<package name="org.apache.hadoop.hdfs.server.common">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.web">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
-</package>
-<package name="org.apache.hadoop.hdfs.server.mover">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode">
-  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
-  <interface name="AuditLogger"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="initialize"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <doc>
-      <![CDATA[Called during initialization of the logger.
-
- @param conf The configuration object.]]>
-      </doc>
-    </method>
-    <method name="logAuditEvent"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="succeeded" type="boolean"/>
-      <param name="userName" type="java.lang.String"/>
-      <param name="addr" type="java.net.InetAddress"/>
-      <param name="cmd" type="java.lang.String"/>
-      <param name="src" type="java.lang.String"/>
-      <param name="dst" type="java.lang.String"/>
-      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
-      <doc>
-      <![CDATA[Called to log an audit event.
- <p>
- This method must return as quickly as possible, since it's called
- in a critical section of the NameNode's operation.
-
- @param succeeded Whether authorization succeeded.
- @param userName Name of the user executing the request.
- @param addr Remote address of the request.
- @param cmd The requested command.
- @param src Path of affected source file.
- @param dst Path of affected destination file (if any).
- @param stat File information for operations that change the file's
-             metadata (permissions, owner, times, etc).]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Interface defining an audit logger.]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
-  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
-  <class name="HdfsAuditLogger" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
-    <constructor name="HdfsAuditLogger"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="logAuditEvent"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="succeeded" type="boolean"/>
-      <param name="userName" type="java.lang.String"/>
-      <param name="addr" type="java.net.InetAddress"/>
-      <param name="cmd" type="java.lang.String"/>
-      <param name="src" type="java.lang.String"/>
-      <param name="dst" type="java.lang.String"/>
-      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
-    </method>
-    <method name="logAuditEvent"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="succeeded" type="boolean"/>
-      <param name="userName" type="java.lang.String"/>
-      <param name="addr" type="java.net.InetAddress"/>
-      <param name="cmd" type="java.lang.String"/>
-      <param name="src" type="java.lang.String"/>
-      <param name="dst" type="java.lang.String"/>
-      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
-      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
-      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
-      <doc>
-      <![CDATA[Same as
- {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
- FileStatus)} with additional parameters related to logging delegation token
- tracking IDs.
- 
- @param succeeded Whether authorization succeeded.
- @param userName Name of the user executing the request.
- @param addr Remote address of the request.
- @param cmd The requested command.
- @param src Path of affected source file.
- @param dst Path of affected destination file (if any).
- @param stat File information for operations that change the file's metadata
-          (permissions, owner, times, etc).
- @param callerContext Context information of the caller
- @param ugi UserGroupInformation of the current user, or null if not logging
-          token tracking information
- @param dtSecretManager The token secret manager, or null if not logging
-          token tracking information]]>
-      </doc>
-    </method>
-    <method name="logAuditEvent"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="succeeded" type="boolean"/>
-      <param name="userName" type="java.lang.String"/>
-      <param name="addr" type="java.net.InetAddress"/>
-      <param name="cmd" type="java.lang.String"/>
-      <param name="src" type="java.lang.String"/>
-      <param name="dst" type="java.lang.String"/>
-      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
-      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
-      <doc>
-      <![CDATA[Same as
- {@link #logAuditEvent(boolean, String, InetAddress, String, String,
- String, FileStatus, CallerContext, UserGroupInformation,
- DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Extension of {@link AuditLogger}.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
-  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
-  <class name="INodeAttributeProvider" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="INodeAttributeProvider"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="start"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Initialize the provider. This method is called at NameNode startup
- time.]]>
-      </doc>
-    </method>
-    <method name="stop"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
-      </doc>
-    </method>
-    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="fullPath" type="java.lang.String"/>
-      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
-    </method>
-    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="pathElements" type="java.lang.String[]"/>
-      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
-    </method>
-    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="components" type="byte[][]"/>
-      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
-    </method>
-    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
-      <doc>
-      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
- Enforcer that can provide an alternate implementation of the
- default permission checking logic.
- @param defaultEnforcer The Default AccessControlEnforcer
- @return The AccessControlEnforcer to use]]>
-      </doc>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.ha">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.top">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
-</package>
-<package name="org.apache.hadoop.hdfs.server.protocol">
-</package>
-<package name="org.apache.hadoop.hdfs.tools">
-</package>
-<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
-</package>
-<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
-</package>
-<package name="org.apache.hadoop.hdfs.tools.snapshot">
-</package>
-<package name="org.apache.hadoop.hdfs.util">
-</package>
-<package name="org.apache.hadoop.hdfs.web">
-</package>
-<package name="org.apache.hadoop.hdfs.web.resources">
-</package>
-
-</api>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/187a00f8/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 5f83da3..cfaa698 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
         <activeByDefault>false</activeByDefault>
       </activation>
       <properties>
-        <jdiff.stable.api>2.9.1</jdiff.stable.api>
+        <jdiff.stable.api>3.0.2</jdiff.stable.api>
         <jdiff.stability>-unstable</jdiff.stability>
         <!-- Commented out for HADOOP-11776 -->
         <!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-8271. [UI2] Improve labeling of certain tables. Contributed by Yesha Vora.

Posted by xy...@apache.org.
YARN-8271. [UI2] Improve labeling of certain tables. Contributed by Yesha Vora.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82e41a35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82e41a35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82e41a35

Branch: refs/heads/HDDS-4
Commit: 82e41a356e8dc62bccbeea8e698d7818fb493db4
Parents: 7192749
Author: Sunil G <su...@apache.org>
Authored: Mon May 14 20:59:31 2018 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js | 2 +-
 .../hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js      | 4 ++--
 .../src/main/webapp/app/templates/cluster-overview.hbs           | 4 ++--
 .../src/main/webapp/app/templates/components/node-menu-panel.hbs | 4 ++--
 .../app/templates/components/yarn-queue/capacity-queue-info.hbs  | 2 +-
 .../app/templates/components/yarn-queue/capacity-queue.hbs       | 2 +-
 .../app/templates/components/yarn-queue/fair-queue-info.hbs      | 2 +-
 .../webapp/app/templates/components/yarn-queue/fair-queue.hbs    | 2 +-
 .../hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs  | 4 ++--
 9 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
index b36098b..cb0c8d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
@@ -23,7 +23,7 @@ export default Ember.Controller.extend({
     text: "Home",
     routeName: 'application'
   }, {
-    text: "Yarn Tools",
+    text: "YARN Tools",
     routeName: 'yarn-tools',
   }],
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
index e1eba5a..ace50e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
@@ -56,11 +56,11 @@ export default Ember.Helper.helper(function(params,hash) {
     html = html + ' class="active"';
   }
   html = html + '><a href="#/yarn-node-apps/' + hash.nodeId + '/' + hash.nodeAddr +
-      '">List of Applications</a></li><li';
+      '">List of Applications on this Node</a></li><li';
   if (hash.path === 'yarn-node-containers') {
     html = html + ' class="active"';
   }
   html = html + '><a href="#/yarn-node-containers/' +hash.nodeId + '/' + hash.nodeAddr +
-      '">List of Containers</a></li></ul></ul></div>';
+      '">List of Containers on this Node</a></li></ul></ul></div>';
   return Ember.String.htmlSafe(html);
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
index ff4682a..e7752ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
@@ -58,7 +58,7 @@
     <div class="col-lg-4 container-fluid">
       <div class="panel panel-default">
         <div class="panel-heading">
-          Finished Apps
+          Finished Apps From All Users
         </div>
         <div class="container-fluid" id="finishedapps-donut-chart">
           {{donut-chart data=model.clusterMetrics.firstObject.getFinishedAppsDataForDonutChart
@@ -74,7 +74,7 @@
     <div class="col-lg-4 container-fluid">
       <div class="panel panel-default">
         <div class="panel-heading">
-          Running Apps
+          Running Apps From All Users
         </div>
         <div class="container-fluid" id="runningapps-donut-chart">
           {{donut-chart data=model.clusterMetrics.firstObject.getRunningAppsDataForDonutChart

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
index acdff2f..ba48952 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
@@ -29,11 +29,11 @@
               {{/link-to}}
             {{/link-to}}
             {{#link-to 'yarn-node-apps' tagName="li"}}
-              {{#link-to 'yarn-node-apps' nodeId encodedAddr}}List of Applications
+              {{#link-to 'yarn-node-apps' nodeId encodedAddr}}List of Applications on this Node
               {{/link-to}}
             {{/link-to}}
             {{#link-to 'yarn-node-containers' tagName="li"}}
-              {{#link-to 'yarn-node-containers' nodeId encodedAddr}}List of Containers
+              {{#link-to 'yarn-node-containers' nodeId encodedAddr}}List of Containers on this Node
               {{/link-to}}
             {{/link-to}}
             {{#if (and nmGpuInfo nmGpuInfo.info.totalGpuDevices)}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
index a7260bc..11fed76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
@@ -20,7 +20,7 @@
   <div class="col-lg-6">
     <div class="panel panel-default">
       <div class="panel-heading">
-        Running Apps: {{model.selected}}
+        Running Apps From All Users: {{model.selected}}
       </div>
       <div id="numapplications-donut-chart">
         {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
index 9ad2a6f..6615b1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
@@ -35,7 +35,7 @@
     {{yarn-queue-partition-capacity-labels partitionMap=model.selectedQueue.partitionMap queue=model.selectedQueue filteredPartition=filteredPartition}}
   </div>
 
-  <h5> Running Apps </h5>
+  <h5> Running Apps From All Users in Queue </h5>
   <div id="numapplications-donut-chart">
     {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
     showLabels=true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
index a770bfe..3e368ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
@@ -51,7 +51,7 @@
     <div class="col-lg-6 container-fluid">
       <div class="panel panel-default">
         <div class="panel-heading">
-          Running Apps: {{model.selected}}
+          Running Apps From All Users: {{model.selected}}
         </div>
         <div class="container-fluid" id="numapplications-donut-chart">
           {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
index dcc80c1..85670da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
@@ -50,7 +50,7 @@
     <div class="container-fluid">
       <div class="panel panel-default">
         <div class="panel-heading">
-          Running Apps: {{model.selected}}
+          Running Apps From All Users: {{model.selected}}
         </div>
         <div class="container-fluid" id="numapplications-donut-chart">
           {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e41a35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
index 2f618fd..3efcf57 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
@@ -67,7 +67,7 @@
           <div class="col-lg-4 container-fluid">
             <div class="panel panel-default">
               <div class="panel-heading">
-                Finished Apps
+                Finished Apps From All Users
               </div>
               <div class="container-fluid" id="finishedapps-donut-chart">
                 {{donut-chart data=model.clusterMetrics.firstObject.getFinishedAppsDataForDonutChart
@@ -84,7 +84,7 @@
           <div class="col-lg-4 container-fluid">
             <div class="panel panel-default">
               <div class="panel-heading">
-                Running Apps
+                Running Apps From All Users
               </div>
               <div class="container-fluid" id="runningapps-donut-chart">
                 {{donut-chart data=model.clusterMetrics.firstObject.getRunningAppsDataForDonutChart


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDDS-32. Fix TestContainerDeletionChoosingPolicy#testTopNOrderedChoosingPolicy. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-32. Fix TestContainerDeletionChoosingPolicy#testTopNOrderedChoosingPolicy.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7befa846
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7befa846
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7befa846

Branch: refs/heads/HDDS-4
Commit: 7befa84654fe3f29b42f96ad37cf6c04358e25ad
Parents: 7d7decb
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 10:18:53 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../common/impl/TestContainerDeletionChoosingPolicy.java | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7befa846/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 331db40..4344419 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -28,6 +28,7 @@ import java.util.Map;
 import java.util.Random;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -138,14 +139,14 @@ public class TestContainerDeletionChoosingPolicy {
 
     int numContainers = 10;
     Random random = new Random();
-    Map<String, Integer> name2Count = new HashMap<>();
+    Map<Long, Integer> name2Count = new HashMap<>();
     // create [numContainers + 1] containers
     for (int i = 0; i <= numContainers; i++) {
-      String containerName = OzoneUtils.getRequestID();
-      ContainerData data = new ContainerData(new Long(i), conf);
+      long containerId = RandomUtils.nextLong();
+      ContainerData data = new ContainerData(containerId, conf);
       containerManager.createContainer(data);
       Assert.assertTrue(
-          containerManager.getContainerMap().containsKey(containerName));
+          containerManager.getContainerMap().containsKey(containerId));
 
       // don't create deletion blocks in the last container.
       if (i == numContainers) {
@@ -155,7 +156,7 @@ public class TestContainerDeletionChoosingPolicy {
       // create random number of deletion blocks and write to container db
       int deletionBlocks = random.nextInt(numContainers) + 1;
       // record <ContainerName, DeletionCount> value
-      name2Count.put(containerName, deletionBlocks);
+      name2Count.put(containerId, deletionBlocks);
       for (int j = 0; j <= deletionBlocks; j++) {
         MetadataStore metadata = KeyUtils.getDB(data, conf);
         String blk = "blk" + i + "-" + j;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HADOOP-15354. hadoop-aliyun & hadoop-azure modules to mark hadoop-common as provided

Posted by xy...@apache.org.
HADOOP-15354. hadoop-aliyun & hadoop-azure modules to mark hadoop-common as provided

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c1c344b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c1c344b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c1c344b

Branch: refs/heads/HDDS-4
Commit: 5c1c344b3e510a723f90f5a8ae65b0c2dba17672
Parents: 4db209b
Author: Steve Loughran <st...@hortonworks.com>
Authored: Thu May 10 18:38:08 2018 +0900
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 hadoop-tools/hadoop-aliyun/pom.xml | 2 +-
 hadoop-tools/hadoop-azure/pom.xml  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1c344b/hadoop-tools/hadoop-aliyun/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml
index cd8cc5d..ed60783 100644
--- a/hadoop-tools/hadoop-aliyun/pom.xml
+++ b/hadoop-tools/hadoop-aliyun/pom.xml
@@ -111,7 +111,7 @@
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
-      <scope>compile</scope>
+      <scope>provided</scope>
     </dependency>
 
     <dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1c344b/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 2f19311..44b67a0 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -143,7 +143,7 @@
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
-      <scope>compile</scope>
+      <scope>provided</scope>
     </dependency>
 
     <dependency>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDDS-42. Inconsistent module names and descriptions. Contributed by Tsz Wo Nicholas Sze.

Posted by xy...@apache.org.
HDDS-42. Inconsistent module names and descriptions. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/757bc8a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/757bc8a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/757bc8a6

Branch: refs/heads/HDDS-4
Commit: 757bc8a67ee54b251b7d83edc39988d78d894103
Parents: 7ecfc2a
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 10 11:44:14 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/client/pom.xml            | 5 +++--
 hadoop-hdds/common/pom.xml            | 2 +-
 hadoop-hdds/container-service/pom.xml | 4 ++--
 hadoop-hdds/framework/pom.xml         | 4 ++--
 hadoop-hdds/pom.xml                   | 4 ++--
 hadoop-hdds/server-scm/pom.xml        | 4 ++--
 hadoop-hdds/tools/pom.xml             | 4 ++--
 hadoop-ozone/acceptance-test/pom.xml  | 4 ++--
 hadoop-ozone/common/pom.xml           | 2 +-
 hadoop-ozone/integration-test/pom.xml | 4 ++--
 hadoop-ozone/ozone-manager/pom.xml    | 4 ++--
 hadoop-ozone/pom.xml                  | 2 +-
 12 files changed, 22 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-hdds/client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index d2efec4..d6db9c6 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -22,10 +22,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <artifactId>hadoop-hdds</artifactId>
     <version>0.2.1-SNAPSHOT</version>
   </parent>
+
   <artifactId>hadoop-hdds-client</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Client libraries</description>
-  <name>Apache HDDS Client</name>
+  <description>Apache Hadoop Distributed Data Store Client Library</description>
+  <name>Apache Hadoop HDDS Client</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-hdds/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index b81da96..bf53042 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -25,7 +25,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <artifactId>hadoop-hdds-common</artifactId>
   <version>0.2.1-SNAPSHOT</version>
   <description>Apache Hadoop Distributed Data Store Common</description>
-  <name>Apache HDDS Common</name>
+  <name>Apache Hadoop HDDS Common</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-hdds/container-service/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
index 36c7235..542462e 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -24,8 +24,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-hdds-container-service</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache HDDS Container server</description>
-  <name>Apache HDDS Container server</name>
+  <description>Apache Hadoop Distributed Data Store Container Service</description>
+  <name>Apache Hadoop HDDS Container Service</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-hdds/framework/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index c8d0797..a497133 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -24,8 +24,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-hdds-server-framework</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache HDDS server framework</description>
-  <name>Apache HDDS Server Common</name>
+  <description>Apache Hadoop Distributed Data Store Server Framework</description>
+  <name>Apache Hadoop HDDS Server Framework</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-hdds/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index c15c541..fab45e2 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -26,8 +26,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
   <artifactId>hadoop-hdds</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Parent project</description>
-  <name>Apache Hdds</name>
+  <description>Apache Hadoop Distributed Data Store Project</description>
+  <name>Apache Hadoop HDDS</name>
   <packaging>pom</packaging>
 
   <modules>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-hdds/server-scm/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index fd927d8..1330be8 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -24,8 +24,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-hdds-server-scm</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache HDDS SCM server</description>
-  <name>Apache Hadoop HDDS SCM server</name>
+  <description>Apache Hadoop Distributed Data Store Storage Container Manager Server</description>
+  <name>Apache Hadoop HDDS SCM Server</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-hdds/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index acc6711..9017d3e 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -25,8 +25,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
   <artifactId>hadoop-hdds-tools</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache HDDS Tools</description>
-  <name>Apache Hadoop HDDS tools</name>
+  <description>Apache Hadoop Distributed Data Store Tools</description>
+  <name>Apache Hadoop HDDS Tools</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-ozone/acceptance-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/pom.xml b/hadoop-ozone/acceptance-test/pom.xml
index b4b24d5..fb6794c 100644
--- a/hadoop-ozone/acceptance-test/pom.xml
+++ b/hadoop-ozone/acceptance-test/pom.xml
@@ -25,8 +25,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-ozone-acceptance-test</artifactId>
   <version>3.2.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Acceptance test</description>
-  <name>Apache Hadoop Ozone acceptance test</name>
+  <description>Apache Hadoop Ozone Acceptance Tests</description>
+  <name>Apache Hadoop Ozone Acceptance Tests</name>
   <packaging>pom</packaging>
   <build>
     <plugins>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-ozone/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 6ddba0f..d8581d1 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -24,7 +24,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-ozone-common</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Common libraris</description>
+  <description>Apache Hadoop Ozone Common</description>
   <name>Apache Hadoop Ozone Common</name>
   <packaging>jar</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-ozone/integration-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index 7e2483d..4aa1aa5 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -24,8 +24,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-ozone-integration-test</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Integration test package</description>
-  <name>Apache Hadoop Ozone integration tests</name>
+  <description>Apache Hadoop Ozone Integration Tests</description>
+  <name>Apache Hadoop Ozone Integration Tests</name>
   <packaging>jar</packaging>
 
   <dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-ozone/ozone-manager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index 7f61129..50e7b45 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -24,8 +24,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-ozone-ozone-manager</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone OM server</description>
-  <name>Apache Hadoop Ozone OM server</name>
+  <description>Apache Hadoop Ozone Manager Server</description>
+  <name>Apache Hadoop Ozone Manager Server</name>
   <packaging>jar</packaging>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757bc8a6/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 5667444..6687382 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -25,7 +25,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </parent>
   <artifactId>hadoop-ozone</artifactId>
   <version>0.2.1-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone parent project</description>
+  <description>Apache Hadoop Ozone Project</description>
   <name>Apache Hadoop Ozone</name>
   <packaging>pom</packaging>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HDDS-37. Remove dependency of hadoop-hdds-common and hadoop-hdds-server-scm from hadoop-ozone/tools/pom.xml. Contributed by Sandeep Nemuri.

Posted by xy...@apache.org.
HDDS-37. Remove dependency of hadoop-hdds-common and hadoop-hdds-server-scm from hadoop-ozone/tools/pom.xml.
Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ac2da8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ac2da8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ac2da8b

Branch: refs/heads/HDDS-4
Commit: 9ac2da8b3ac6e6e1e5f01381b9930856a5b697b5
Parents: 78d9241
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 10 16:27:21 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 hadoop-ozone/tools/pom.xml | 12 ------------
 1 file changed, 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ac2da8b/hadoop-ozone/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index e586f1b..a78565a 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -49,18 +49,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>metrics-core</artifactId>
       <version>3.2.4</version>
     </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
     <dependency>
       <groupId>org.openjdk.jmh</groupId>
       <artifactId>jmh-core</artifactId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-7654. Support ENTRY_POINT for docker container. Contributed by Eric Yang

Posted by xy...@apache.org.
YARN-7654. Support ENTRY_POINT for docker container. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1ba9682
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1ba9682
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1ba9682

Branch: refs/heads/HDDS-4
Commit: d1ba968278f140c08caf9ee535d42188c5555875
Parents: 51f44b8
Author: Jason Lowe <jl...@apache.org>
Authored: Fri May 11 18:56:05 2018 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/api/ApplicationConstants.java   |   9 +-
 .../provider/AbstractProviderService.java       |  91 ++++++++++-----
 .../provider/docker/DockerProviderService.java  |  42 +++++++
 .../launcher/ContainerLaunch.java               |  14 +++
 .../runtime/DockerLinuxContainerRuntime.java    |  20 ++--
 .../linux/runtime/docker/DockerClient.java      | 115 ++++++++++--------
 .../linux/runtime/docker/DockerRunCommand.java  |  44 +++++++
 .../impl/container-executor.c                   | 116 ++++++++++++++++++-
 .../container-executor/impl/utils/docker-util.c |  46 ++++++++
 .../container-executor/impl/utils/docker-util.h |  14 +++
 .../test/utils/test_docker_util.cc              |  42 +++++++
 .../src/site/markdown/DockerContainers.md       |   1 +
 12 files changed, 467 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 38ad596..b63fe61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -244,7 +244,14 @@ public interface ApplicationConstants {
      * Comma separate list of directories that the container should use for
      * logging.
      */
-    LOG_DIRS("LOG_DIRS");
+    LOG_DIRS("LOG_DIRS"),
+
+    /**
+     * $YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE
+     * Final, Docker run support ENTRY_POINT.
+     */
+    YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE(
+        "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE");
 
     private final String variable;
     private Environment(String variable) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
index 5a17817..6d213c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
@@ -58,23 +58,26 @@ public abstract class AbstractProviderService implements ProviderService,
       Service service)
       throws IOException;
 
-  public void buildContainerLaunchContext(AbstractLauncher launcher,
+  public Map<String, String> buildContainerTokens(ComponentInstance instance,
+      Container container,
+      ContainerLaunchService.ComponentLaunchContext compLaunchContext) {
+      // Generate tokens (key-value pair) for config substitution.
+      // Get pre-defined tokens
+      Map<String, String> globalTokens =
+          instance.getComponent().getScheduler().globalTokens;
+      Map<String, String> tokensForSubstitution = ProviderUtils
+          .initCompTokensForSubstitute(instance, container,
+              compLaunchContext);
+      tokensForSubstitution.putAll(globalTokens);
+      return tokensForSubstitution;
+  }
+
+  public void buildContainerEnvironment(AbstractLauncher launcher,
       Service service, ComponentInstance instance,
       SliderFileSystem fileSystem, Configuration yarnConf, Container container,
-      ContainerLaunchService.ComponentLaunchContext compLaunchContext)
-      throws IOException, SliderException {
-    processArtifact(launcher, instance, fileSystem, service);
-
-    ServiceContext context =
-        instance.getComponent().getScheduler().getContext();
-    // Generate tokens (key-value pair) for config substitution.
-    // Get pre-defined tokens
-    Map<String, String> globalTokens =
-        instance.getComponent().getScheduler().globalTokens;
-    Map<String, String> tokensForSubstitution = ProviderUtils
-        .initCompTokensForSubstitute(instance, container,
-            compLaunchContext);
-    tokensForSubstitution.putAll(globalTokens);
+      ContainerLaunchService.ComponentLaunchContext compLaunchContext,
+      Map<String, String> tokensForSubstitution)
+          throws IOException, SliderException {
     // Set the environment variables in launcher
     launcher.putEnv(ServiceUtils.buildEnvMap(
         compLaunchContext.getConfiguration(), tokensForSubstitution));
@@ -90,17 +93,14 @@ public abstract class AbstractProviderService implements ProviderService,
     for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
       tokensForSubstitution.put($(entry.getKey()), entry.getValue());
     }
-    //TODO add component host tokens?
-//    ProviderUtils.addComponentHostTokens(tokensForSubstitution, amState);
-
-    // create config file on hdfs and add local resource
-    ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
-        compLaunchContext, tokensForSubstitution, instance, context);
-
-    // handles static files (like normal file / archive file) for localization.
-    ProviderUtils.handleStaticFilesForLocalization(launcher, fileSystem,
-        compLaunchContext);
+  }
 
+  public void buildContainerLaunchCommand(AbstractLauncher launcher,
+      Service service, ComponentInstance instance,
+      SliderFileSystem fileSystem, Configuration yarnConf, Container container,
+      ContainerLaunchService.ComponentLaunchContext compLaunchContext,
+      Map<String, String> tokensForSubstitution)
+          throws IOException, SliderException {
     // substitute launch command
     String launchCommand = compLaunchContext.getLaunchCommand();
     // docker container may have empty commands
@@ -112,10 +112,15 @@ public abstract class AbstractProviderService implements ProviderService,
       operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
       launcher.addCommand(operation.build());
     }
+  }
 
+  public void buildContainerRetry(AbstractLauncher launcher,
+      Configuration yarnConf,
+      ContainerLaunchService.ComponentLaunchContext compLaunchContext) {
     // By default retry forever every 30 seconds
     launcher.setRetryContext(
-        YarnServiceConf.getInt(CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_MAX,
+        YarnServiceConf.getInt(CONTAINER_RETRY_MAX,
+            DEFAULT_CONTAINER_RETRY_MAX,
             compLaunchContext.getConfiguration(), yarnConf),
         YarnServiceConf.getInt(CONTAINER_RETRY_INTERVAL,
             DEFAULT_CONTAINER_RETRY_INTERVAL,
@@ -124,4 +129,38 @@ public abstract class AbstractProviderService implements ProviderService,
             DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL,
             compLaunchContext.getConfiguration(), yarnConf));
   }
+
+  public void buildContainerLaunchContext(AbstractLauncher launcher,
+      Service service, ComponentInstance instance,
+      SliderFileSystem fileSystem, Configuration yarnConf, Container container,
+      ContainerLaunchService.ComponentLaunchContext compLaunchContext)
+      throws IOException, SliderException {
+    processArtifact(launcher, instance, fileSystem, service);
+
+    ServiceContext context =
+        instance.getComponent().getScheduler().getContext();
+    // Generate tokens (key-value pair) for config substitution.
+    Map<String, String> tokensForSubstitution =
+        buildContainerTokens(instance, container, compLaunchContext);
+
+    // Setup launch context environment
+    buildContainerEnvironment(launcher, service, instance,
+        fileSystem, yarnConf, container, compLaunchContext,
+        tokensForSubstitution);
+
+    // create config file on hdfs and add local resource
+    ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
+        compLaunchContext, tokensForSubstitution, instance, context);
+
+    // handles static files (like normal file / archive file) for localization.
+    ProviderUtils.handleStaticFilesForLocalization(launcher, fileSystem,
+        compLaunchContext);
+
+    // replace launch command with token specific information
+    buildContainerLaunchCommand(launcher, service, instance, fileSystem,
+        yarnConf, container, compLaunchContext, tokensForSubstitution);
+
+    // Setup container retry settings
+    buildContainerRetry(launcher, yarnConf, compLaunchContext);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
index c3e2619..821682d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
@@ -17,13 +17,23 @@
  */
 package org.apache.hadoop.yarn.service.provider.docker;
 
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
+import org.apache.hadoop.yarn.service.provider.ProviderUtils;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+import org.apache.hadoop.yarn.service.containerlaunch.CommandLineBuilder;
+import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService;
+import org.apache.hadoop.yarn.service.exceptions.SliderException;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
 
 import java.io.IOException;
+import java.util.Map;
 
 public class DockerProviderService extends AbstractProviderService
     implements DockerKeys {
@@ -39,4 +49,36 @@ public class DockerProviderService extends AbstractProviderService
     launcher.setRunPrivilegedContainer(
         compInstance.getCompSpec().getRunPrivilegedContainer());
   }
+
+  @Override
+  public void buildContainerLaunchCommand(AbstractLauncher launcher,
+      Service service, ComponentInstance instance,
+      SliderFileSystem fileSystem, Configuration yarnConf, Container container,
+      ContainerLaunchService.ComponentLaunchContext compLaunchContext,
+      Map<String, String> tokensForSubstitution)
+          throws IOException, SliderException {
+    Component component = instance.getComponent().getComponentSpec();
+    boolean useEntryPoint = Boolean.parseBoolean(component
+        .getConfiguration().getEnv(Environment
+          .YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE.name()));
+    if (useEntryPoint) {
+      String launchCommand = component.getLaunchCommand();
+      if (!StringUtils.isEmpty(launchCommand)) {
+        launcher.addCommand(launchCommand);
+      }
+    } else {
+      // substitute launch command
+      String launchCommand = compLaunchContext.getLaunchCommand();
+      // docker container may have empty commands
+      if (!StringUtils.isEmpty(launchCommand)) {
+        launchCommand = ProviderUtils
+            .substituteStrWithTokens(launchCommand, tokensForSubstitution);
+        CommandLineBuilder operation = new CommandLineBuilder();
+        operation.add(launchCommand);
+        operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
+        launcher.addCommand(operation.build());
+      }
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index fa77899..d43c069 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1677,6 +1677,20 @@ public class ContainerLaunch implements Callable<Integer> {
       containerLogDirs, Map<Path, List<String>> resources,
       Path nmPrivateClasspathJarDir,
       Set<String> nmVars) throws IOException {
+    // Based on discussion in YARN-7654, for ENTRY_POINT enabled
+    // docker container, we forward user defined environment variables
+    // without node manager environment variables.  This is the reason
+    // that we skip sanitizeEnv method.
+    boolean overrideDisable = Boolean.parseBoolean(
+        environment.get(
+            Environment.
+                YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE.
+                    name()));
+    if (overrideDisable) {
+      environment.remove("WORK_DIR");
+      return;
+    }
+
     /**
      * Non-modifiable environment variables
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 0bacd03..a14b085 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -235,7 +235,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_DELAYED_REMOVAL =
       "YARN_CONTAINER_RUNTIME_DOCKER_DELAYED_REMOVAL";
-
   private Configuration conf;
   private Context nmContext;
   private DockerClient dockerClient;
@@ -741,6 +740,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE);
     String network = environment.get(ENV_DOCKER_CONTAINER_NETWORK);
     String hostname = environment.get(ENV_DOCKER_CONTAINER_HOSTNAME);
+    boolean useEntryPoint = Boolean.parseBoolean(environment
+              .get(ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE));
 
     if(network == null || network.isEmpty()) {
       network = defaultNetwork;
@@ -802,8 +803,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     @SuppressWarnings("unchecked")
     DockerRunCommand runCommand = new DockerRunCommand(containerIdStr,
         dockerRunAsUser, imageName)
-        .detachOnRun()
-        .setContainerWorkDir(containerWorkDir.toString())
         .setNetworkType(network);
     // Only add hostname if network is not host or if Registry DNS is enabled.
     if (!network.equalsIgnoreCase("host") ||
@@ -875,19 +874,22 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand);
 
-    String disableOverride = environment.get(
-        ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE);
-
-    if (disableOverride != null && disableOverride.equals("true")) {
-      LOG.info("command override disabled");
+    if (useEntryPoint) {
+      runCommand.setOverrideDisabled(true);
+      runCommand.addEnv(environment);
+      runCommand.setOverrideCommandWithArgs(container.getLaunchContext()
+          .getCommands());
+      runCommand.disableDetach();
+      runCommand.setLogDir(container.getLogDir());
     } else {
       List<String> overrideCommands = new ArrayList<>();
       Path launchDst =
           new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
-
       overrideCommands.add("bash");
       overrideCommands.add(launchDst.toUri().getPath());
+      runCommand.setContainerWorkDir(containerWorkDir.toString());
       runCommand.setOverrideCommandWithArgs(overrideCommands);
+      runCommand.detachOnRun();
     }
 
     if(enableUserReMapping) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
index dd49e15..fca707c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
@@ -49,6 +49,7 @@ public final class DockerClient {
        LoggerFactory.getLogger(DockerClient.class);
   private static final String TMP_FILE_PREFIX = "docker.";
   private static final String TMP_FILE_SUFFIX = ".cmd";
+  private static final String TMP_ENV_FILE_SUFFIX = ".env";
   private final String tmpDirPath;
 
   public DockerClient(Configuration conf) throws ContainerExecutionException {
@@ -69,40 +70,56 @@ public final class DockerClient {
 
   public String writeCommandToTempFile(DockerCommand cmd, String filePrefix)
       throws ContainerExecutionException {
-    File dockerCommandFile = null;
     try {
-      dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
-          TMP_FILE_SUFFIX, new
-          File(tmpDirPath));
-
-      Writer writer = new OutputStreamWriter(
-          new FileOutputStream(dockerCommandFile), "UTF-8");
-      PrintWriter printWriter = new PrintWriter(writer);
-      printWriter.println("[docker-command-execution]");
-      for (Map.Entry<String, List<String>> entry :
-          cmd.getDockerCommandWithArguments().entrySet()) {
-        if (entry.getKey().contains("=")) {
-          throw new ContainerExecutionException(
-              "'=' found in entry for docker command file, key = " + entry
-                  .getKey() + "; value = " + entry.getValue());
-        }
-        if (entry.getValue().contains("\n")) {
-          throw new ContainerExecutionException(
-              "'\\n' found in entry for docker command file, key = " + entry
-                  .getKey() + "; value = " + entry.getValue());
+      File dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
+        TMP_FILE_SUFFIX, new
+        File(tmpDirPath));
+      try (
+        Writer writer = new OutputStreamWriter(
+            new FileOutputStream(dockerCommandFile), "UTF-8");
+        PrintWriter printWriter = new PrintWriter(writer);
+      ) {
+        printWriter.println("[docker-command-execution]");
+        for (Map.Entry<String, List<String>> entry :
+            cmd.getDockerCommandWithArguments().entrySet()) {
+          if (entry.getKey().contains("=")) {
+            throw new ContainerExecutionException(
+                "'=' found in entry for docker command file, key = " + entry
+                    .getKey() + "; value = " + entry.getValue());
+          }
+          if (entry.getValue().contains("\n")) {
+            throw new ContainerExecutionException(
+                "'\\n' found in entry for docker command file, key = " + entry
+                    .getKey() + "; value = " + entry.getValue());
+          }
+          printWriter.println("  " + entry.getKey() + "=" + StringUtils
+              .join(",", entry.getValue()));
         }
-        printWriter.println("  " + entry.getKey() + "=" + StringUtils
-            .join(",", entry.getValue()));
+        return dockerCommandFile.getAbsolutePath();
       }
-      printWriter.close();
-
-      return dockerCommandFile.getAbsolutePath();
     } catch (IOException e) {
       LOG.warn("Unable to write docker command to temporary file!");
       throw new ContainerExecutionException(e);
     }
   }
 
+  private String writeEnvFile(DockerRunCommand cmd, String filePrefix,
+      File cmdDir) throws IOException {
+    File dockerEnvFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
+        TMP_ENV_FILE_SUFFIX, cmdDir);
+    try (
+        Writer envWriter = new OutputStreamWriter(
+            new FileOutputStream(dockerEnvFile), "UTF-8");
+        PrintWriter envPrintWriter = new PrintWriter(envWriter);
+    ) {
+      for (Map.Entry<String, String> entry : cmd.getEnv()
+          .entrySet()) {
+        envPrintWriter.println(entry.getKey() + "=" + entry.getValue());
+      }
+      return dockerEnvFile.getAbsolutePath();
+    }
+  }
+
   public String writeCommandToTempFile(DockerCommand cmd,
       ContainerId containerId, Context nmContext)
       throws ContainerExecutionException {
@@ -126,32 +143,38 @@ public final class DockerClient {
         throw new IOException("Cannot create container private directory "
             + cmdDir);
       }
-
       dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
           TMP_FILE_SUFFIX, cmdDir);
-
-      Writer writer = new OutputStreamWriter(
-          new FileOutputStream(dockerCommandFile.toString()), "UTF-8");
-      PrintWriter printWriter = new PrintWriter(writer);
-      printWriter.println("[docker-command-execution]");
-      for (Map.Entry<String, List<String>> entry :
-          cmd.getDockerCommandWithArguments().entrySet()) {
-        if (entry.getKey().contains("=")) {
-          throw new ContainerExecutionException(
-              "'=' found in entry for docker command file, key = " + entry
-                  .getKey() + "; value = " + entry.getValue());
+      try (
+        Writer writer = new OutputStreamWriter(
+            new FileOutputStream(dockerCommandFile.toString()), "UTF-8");
+        PrintWriter printWriter = new PrintWriter(writer);
+      ) {
+        printWriter.println("[docker-command-execution]");
+        for (Map.Entry<String, List<String>> entry :
+            cmd.getDockerCommandWithArguments().entrySet()) {
+          if (entry.getKey().contains("=")) {
+            throw new ContainerExecutionException(
+                "'=' found in entry for docker command file, key = " + entry
+                    .getKey() + "; value = " + entry.getValue());
+          }
+          if (entry.getValue().contains("\n")) {
+            throw new ContainerExecutionException(
+                "'\\n' found in entry for docker command file, key = " + entry
+                    .getKey() + "; value = " + entry.getValue());
+          }
+          printWriter.println("  " + entry.getKey() + "=" + StringUtils
+              .join(",", entry.getValue()));
         }
-        if (entry.getValue().contains("\n")) {
-          throw new ContainerExecutionException(
-              "'\\n' found in entry for docker command file, key = " + entry
-                  .getKey() + "; value = " + entry.getValue());
+        if (cmd instanceof DockerRunCommand) {
+          DockerRunCommand runCommand = (DockerRunCommand) cmd;
+          if (runCommand.containsEnv()) {
+            String path = writeEnvFile(runCommand, filePrefix, cmdDir);
+            printWriter.println("  environ=" + path);
+          }
         }
-        printWriter.println("  " + entry.getKey() + "=" + StringUtils
-            .join(",", entry.getValue()));
+        return dockerCommandFile.toString();
       }
-      printWriter.close();
-
-      return dockerCommandFile.toString();
     } catch (IOException e) {
       LOG.warn("Unable to write docker command to " + cmdDir);
       throw new ContainerExecutionException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index bfeeaf5..af16178 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -21,12 +21,14 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
 import java.io.File;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 public class DockerRunCommand extends DockerCommand {
   private static final String RUN_COMMAND = "run";
+  private final Map<String, String> userEnv;
 
   /** The following are mandatory: */
   public DockerRunCommand(String containerId, String user, String image) {
@@ -34,6 +36,7 @@ public class DockerRunCommand extends DockerCommand {
     super.addCommandArguments("name", containerId);
     super.addCommandArguments("user", user);
     super.addCommandArguments("image", image);
+    this.userEnv = new LinkedHashMap<String, String>();
   }
 
   public DockerRunCommand removeContainerOnExit() {
@@ -174,4 +177,45 @@ public class DockerRunCommand extends DockerCommand {
   public Map<String, List<String>> getDockerCommandWithArguments() {
     return super.getDockerCommandWithArguments();
   }
+
+  public DockerRunCommand setOverrideDisabled(boolean toggle) {
+    String value = Boolean.toString(toggle);
+    super.addCommandArguments("use-entry-point", value);
+    return this;
+  }
+
+  public DockerRunCommand setLogDir(String logDir) {
+    super.addCommandArguments("log-dir", logDir);
+    return this;
+  }
+
+  /**
+   * Check if user defined environment variables are empty.
+   *
+   * @return true if user defined environment variables are not empty.
+   */
+  public boolean containsEnv() {
+    if (userEnv.size() > 0) {
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * Get user defined environment variables.
+   *
+   * @return a map of user defined environment variables
+   */
+  public Map<String, String> getEnv() {
+    return userEnv;
+  }
+
+  /**
+   * Add user defined environment variables.
+   *
+   * @param environment A map of user defined environment variables
+   */
+  public final void addEnv(Map<String, String> environment) {
+    userEnv.putAll(environment);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index c5adbe4..7b62223 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -94,6 +94,8 @@ static gid_t nm_gid = -1;
 struct configuration CFG = {.size=0, .sections=NULL};
 struct section executor_cfg = {.size=0, .kv_pairs=NULL};
 
+static char *chosen_container_log_dir = NULL;
+
 char *concatenate(char *concat_pattern, char *return_path_name,
    int numArgs, ...);
 
@@ -755,8 +757,9 @@ static int create_container_directories(const char* user, const char *app_id,
       } else if (mkdirs(container_log_dir, perms) != 0) {
         free(container_log_dir);
       } else {
-        free(container_log_dir);
         result = 0;
+        chosen_container_log_dir = strdup(container_log_dir);
+        free(container_log_dir);
       }
     }
     free(combined_name);
@@ -1129,6 +1132,34 @@ char* get_container_log_directory(const char *log_root, const char* app_id,
                      container_id);
 }
 
+char *init_log_path(const char *container_log_dir, const char *logfile) {
+  char *tmp_buffer = NULL;
+  tmp_buffer = make_string("%s/%s", container_log_dir, logfile);
+
+  mode_t permissions = S_IRUSR | S_IWUSR | S_IRGRP;
+  int fd = open(tmp_buffer, O_CREAT | O_WRONLY, permissions);
+  if (fd >= 0) {
+    close(fd);
+    if (change_owner(tmp_buffer, user_detail->pw_uid, user_detail->pw_gid) != 0) {
+      fprintf(ERRORFILE, "Failed to chown %s to %d:%d: %s\n", tmp_buffer, user_detail->pw_uid, user_detail->pw_gid,
+          strerror(errno));
+      free(tmp_buffer);
+      tmp_buffer = NULL;
+    } else if (chmod(tmp_buffer, permissions) != 0) {
+      fprintf(ERRORFILE, "Can't chmod %s - %s\n",
+              tmp_buffer, strerror(errno));
+      free(tmp_buffer);
+      tmp_buffer = NULL;
+    }
+  } else {
+    fprintf(ERRORFILE, "Failed to create file %s - %s\n", tmp_buffer,
+            strerror(errno));
+    free(tmp_buffer);
+    tmp_buffer = NULL;
+  }
+  return tmp_buffer;
+}
+
 int create_container_log_dirs(const char *container_id, const char *app_id,
                               char * const * log_dirs) {
   char* const* log_root;
@@ -1506,6 +1537,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
   char *docker_inspect_exitcode_command = NULL;
   int container_file_source =-1;
   int cred_file_source = -1;
+  int use_entry_point = 0;
 
   gid_t user_gid = getegid();
   uid_t prev_uid = geteuid();
@@ -1560,6 +1592,18 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
     goto cleanup;
   }
 
+  use_entry_point = get_use_entry_point_flag();
+  char *so = init_log_path(chosen_container_log_dir, "stdout.txt");
+  if (so == NULL) {
+    exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+    goto cleanup;
+  }
+  char *se = init_log_path(chosen_container_log_dir, "stderr.txt");
+  if (se == NULL) {
+    exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+    goto cleanup;
+  }
+
   docker_command_with_binary = flatten(docker_command);
 
   // Launch container
@@ -1573,14 +1617,76 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
   }
 
   if (child_pid == 0) {
+    FILE* so_fd = fopen(so, "a+");
+    if (so_fd == NULL) {
+      fprintf(ERRORFILE, "Could not append to %s\n", so);
+      exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+      goto cleanup;
+    }
+    FILE* se_fd = fopen(se, "a+");
+    if (se_fd == NULL) {
+      fprintf(ERRORFILE, "Could not append to %s\n", se);
+      exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+      fclose(so_fd);
+      goto cleanup;
+    }
+    // if entry point is enabled, clone docker command output
+    // to stdout.txt and stderr.txt for yarn.
+    if (use_entry_point) {
+      fprintf(so_fd, "Launching docker container...\n");
+      fprintf(so_fd, "Docker run command: %s\n", docker_command_with_binary);
+      if (dup2(fileno(so_fd), fileno(stdout)) == -1) {
+        fprintf(ERRORFILE, "Could not append to stdout.txt\n");
+        fclose(so_fd);
+        return UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+      }
+      if (dup2(fileno(se_fd), fileno(stderr)) == -1) {
+        fprintf(ERRORFILE, "Could not append to stderr.txt\n");
+        fclose(se_fd);
+        return UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+      }
+    }
+    fclose(so_fd);
+    fclose(se_fd);
     execvp(docker_binary, docker_command);
     fprintf(ERRORFILE, "failed to execute docker command! error: %s\n", strerror(errno));
     return UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
   } else {
-    exit_code = wait_and_get_exit_code(child_pid);
-    if (exit_code != 0) {
-      exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
-      goto cleanup;
+    if (use_entry_point) {
+      int pid = 0;
+      int res = 0;
+      int count = 0;
+      int max_retries = get_max_retries(&CFG);
+      docker_inspect_command = make_string(
+          "%s inspect --format {{.State.Pid}} %s",
+          docker_binary, container_id);
+      // check for docker container pid
+      while (count < max_retries) {
+        fprintf(LOGFILE, "Inspecting docker container...\n");
+        fprintf(LOGFILE, "Docker inspect command: %s\n", docker_inspect_command);
+        fflush(LOGFILE);
+        FILE* inspect_docker = popen(docker_inspect_command, "r");
+        res = fscanf (inspect_docker, "%d", &pid);
+        fprintf(LOGFILE, "pid from docker inspect: %d\n", pid);
+        if (pclose (inspect_docker) != 0 || res <= 0) {
+          fprintf (ERRORFILE,
+              "Could not inspect docker to get pid %s.\n", docker_inspect_command);
+          fflush(ERRORFILE);
+          exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+        } else {
+          if (pid != 0) {
+            break;
+          }
+        }
+        sleep(3);
+        count++;
+      }
+    } else {
+      exit_code = wait_and_get_exit_code(child_pid);
+      if (exit_code != 0) {
+        exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+        goto cleanup;
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 5be02a9..f361d34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -32,6 +32,8 @@
 #include <pwd.h>
 #include <errno.h>
 
+int entry_point = 0;
+
 static int read_and_verify_command_file(const char *command_file, const char *docker_command,
                                         struct configuration *command_config) {
   int ret = 0;
@@ -336,6 +338,17 @@ const char *get_docker_error_message(const int error_code) {
   }
 }
 
+int get_max_retries(const struct configuration *conf) {
+  int retries = 10;
+  char *max_retries = get_configuration_value(DOCKER_INSPECT_MAX_RETRIES_KEY,
+      CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
+  if (max_retries != NULL) {
+    retries = atoi(max_retries);
+    free(max_retries);
+  }
+  return retries;
+}
+
 char *get_docker_binary(const struct configuration *conf) {
   char *docker_binary = NULL;
   docker_binary = get_configuration_value(DOCKER_BINARY_KEY, CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
@@ -348,6 +361,10 @@ char *get_docker_binary(const struct configuration *conf) {
   return docker_binary;
 }
 
+int get_use_entry_point_flag() {
+  return entry_point;
+}
+
 int docker_module_enabled(const struct configuration *conf) {
   struct section *section = get_configuration_section(CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
   if (section != NULL) {
@@ -365,6 +382,12 @@ int get_docker_command(const char *command_file, const struct configuration *con
     return INVALID_COMMAND_FILE;
   }
 
+  char *value = get_configuration_value("use-entry-point", DOCKER_COMMAND_FILE_SECTION, &command_config);
+  if (value != NULL && strcasecmp(value, "true") == 0) {
+    entry_point = 1;
+  }
+  free(value);
+
   char *command = get_configuration_value("docker-command", DOCKER_COMMAND_FILE_SECTION, &command_config);
   if (strcmp(DOCKER_INSPECT_COMMAND, command) == 0) {
     return get_docker_inspect_command(command_file, conf, args);
@@ -1009,6 +1032,24 @@ static int set_devices(const struct configuration *command_config, const struct
   return ret;
 }
 
+static int set_env(const struct configuration *command_config, struct args *args) {
+  int ret = 0;
+  // Use envfile method.
+  char *envfile = get_configuration_value("environ", DOCKER_COMMAND_FILE_SECTION, command_config);
+  if (envfile != NULL) {
+    ret = add_to_args(args, "--env-file");
+    if (ret != 0) {
+      ret = BUFFER_TOO_SMALL;
+    }
+    ret = add_to_args(args, envfile);
+    if (ret != 0) {
+      ret = BUFFER_TOO_SMALL;
+    }
+    free(envfile);
+  }
+  return ret;
+}
+
 /**
  * Helper function to help normalize mounts for checking if mounts are
  * permitted. The function does the following -
@@ -1520,6 +1561,11 @@ int get_docker_run_command(const char *command_file, const struct configuration
     return ret;
   }
 
+  ret = set_env(&command_config, args);
+  if (ret != 0) {
+    return BUFFER_TOO_SMALL;
+  }
+
   ret = add_to_args(args, image);
   if (ret != 0) {
     reset_args(args);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
index 330d722..864acd9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
@@ -23,6 +23,7 @@
 
 #define CONTAINER_EXECUTOR_CFG_DOCKER_SECTION "docker"
 #define DOCKER_BINARY_KEY "docker.binary"
+#define DOCKER_INSPECT_MAX_RETRIES_KEY "docker.inspect.max.retries"
 #define DOCKER_COMMAND_FILE_SECTION "docker-command-execution"
 #define DOCKER_INSPECT_COMMAND "inspect"
 #define DOCKER_LOAD_COMMAND "load"
@@ -86,6 +87,12 @@ char *get_docker_binary(const struct configuration *conf);
 int get_docker_command(const char* command_file, const struct configuration* conf, args *args);
 
 /**
+ * Check if use-entry-point flag is set.
+ * @return 0 when use-entry-point flag is set.
+ */
+int get_use_entry_point_flag();
+
+/**
  * Get the Docker inspect command line string. The function will verify that the params file is meant for the
  * inspect command.
  * @param command_file File containing the params for the Docker inspect command
@@ -202,4 +209,11 @@ void reset_args(args *args);
  * @param args Pointer reference to args data structure
  */
 char** extract_execv_args(args *args);
+
+/**
+ * Get max retries for docker inspect.
+ * @param conf Configuration structure
+ * @return value of max retries
+ */
+int get_max_retries(const struct configuration *conf);
 #endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index 3746fa1..1fa425c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -1312,6 +1312,48 @@ namespace ContainerExecutor {
     run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_run_command);
   }
 
+  TEST_F(TestDockerUtil, test_docker_run_entry_point) {
+
+    std::string container_executor_contents = "[docker]\n"
+        "  docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n"
+        "  docker.allowed.rw-mounts=/tmp\n  docker.allowed.networks=bridge\n "
+        "  docker.privileged-containers.enabled=1\n  docker.allowed.capabilities=CHOWN,SETUID\n"
+        "  docker.allowed.devices=/dev/test\n  docker.privileged-containers.registries=hadoop\n";
+    write_file(container_executor_cfg_file, container_executor_contents);
+    int ret = read_config(container_executor_cfg_file.c_str(), &container_executor_cfg);
+    if (ret != 0) {
+      FAIL();
+    }
+    ret = create_ce_file();
+    if (ret != 0) {
+      std::cerr << "Could not create ce file, skipping test" << std::endl;
+      return;
+    }
+
+    std::vector<std::pair<std::string, std::string> > file_cmd_vec;
+    file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
+        "[docker-command-execution]\n"
+        "  docker-command=run\n"
+        "  name=container_e1_12312_11111_02_000001\n"
+        "  image=hadoop/docker-image\n"
+        "  user=nobody\n"
+        "  use-entry-point=true\n"
+        "  environ=/tmp/test.env\n",
+        "/usr/bin/docker run --name=container_e1_12312_11111_02_000001 --user=nobody --cap-drop=ALL "
+        "--env-file /tmp/test.env hadoop/docker-image"));
+
+    std::vector<std::pair<std::string, int> > bad_file_cmd_vec;
+
+    bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
+        "[docker-command-execution]\n"
+        "  docker-command=run\n"
+        "  image=hadoop/docker-image\n"
+        "  user=nobody",
+        static_cast<int>(INVALID_DOCKER_CONTAINER_NAME)));
+
+    run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_run_command);
+  }
+
   TEST_F(TestDockerUtil, test_docker_run_no_privileged) {
 
     std::string container_executor_contents[] = {"[docker]\n  docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ba9682/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 2efba3b..423f1da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -207,6 +207,7 @@ are allowed. It contains the following properties:
 | `docker.host-pid-namespace.enabled` | Set to "true" or "false" to enable or disable using the host's PID namespace. Default value is "false". |
 | `docker.privileged-containers.enabled` | Set to "true" or "false" to enable or disable launching privileged containers. Default value is "false". |
 | `docker.privileged-containers.registries` | Comma separated list of trusted docker registries for running trusted privileged docker containers.  By default, no registries are defined. |
+| `docker.inspect.max.retries` | Integer value to check docker container readiness.  Each inspection is set with 3 seconds delay.  Default value of 10 will wait 30 seconds for docker container to become ready before marked as container failed. |
 
 Please note that if you wish to run Docker containers that require access to the YARN local directories, you must add them to the docker.allowed.rw-mounts list.
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDDS-25. Simple async event processing for SCM. Contributed by Elek, Marton.

Posted by xy...@apache.org.
HDDS-25. Simple async event processing for SCM.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4ef3514
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4ef3514
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4ef3514

Branch: refs/heads/HDDS-4
Commit: a4ef3514621304777bee607114b35e2b8553a53a
Parents: a95bd94
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 11:35:21 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/server/events/Event.java |  42 ++++
 .../hdds/server/events/EventExecutor.java       |  68 ++++++
 .../hadoop/hdds/server/events/EventHandler.java |  33 +++
 .../hdds/server/events/EventPublisher.java      |  28 +++
 .../hadoop/hdds/server/events/EventQueue.java   | 213 +++++++++++++++++++
 .../server/events/SingleThreadExecutor.java     | 103 +++++++++
 .../hadoop/hdds/server/events/TypedEvent.java   |  51 +++++
 .../hadoop/hdds/server/events/package-info.java |  23 ++
 .../hdds/server/events/TestEventQueue.java      | 113 ++++++++++
 .../hdds/server/events/TestEventQueueChain.java |  79 +++++++
 10 files changed, 753 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
new file mode 100644
index 0000000..810c8b3
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Identifier of an async event.
+ *
+ * @param <PAYLOAD> THe message payload type of this event.
+ */
+public interface Event<PAYLOAD> {
+
+  /**
+   * The type of the event payload. Payload contains all the required data
+   * to process the event.
+   *
+   */
+  Class<PAYLOAD> getPayloadType();
+
+  /**
+   * The human readable name of the event.
+   *
+   * Used for display in thread names
+   * and monitoring.
+   *
+   */
+  String getName();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
new file mode 100644
index 0000000..4257839
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Executors defined the  way how an EventHandler should be called.
+ * <p>
+ * Executors are used only by the EventQueue and they do the thread separation
+ * between the caller and the EventHandler.
+ * <p>
+ * Executors should guarantee that only one thread is executing one
+ * EventHandler at the same time.
+ *
+ * @param <PAYLOAD> the payload type of the event.
+ */
+public interface EventExecutor<PAYLOAD> extends AutoCloseable {
+
+  /**
+   * Process an event payload.
+   *
+   * @param handler      the handler to process the payload
+   * @param eventPayload to be processed.
+   * @param publisher    to send response/other message forward to the chain.
+   */
+  void onMessage(EventHandler<PAYLOAD> handler,
+      PAYLOAD eventPayload,
+      EventPublisher
+          publisher);
+
+  /**
+   * Return the number of the failed events.
+   */
+  long failedEvents();
+
+
+  /**
+   * Return the number of the processed events.
+   */
+  long successfulEvents();
+
+  /**
+   * Return the number of the not-yet processed events.
+   */
+  long queuedEvents();
+
+  /**
+   * The human readable name for the event executor.
+   * <p>
+   * Used in monitoring and logging.
+   *
+   */
+  String getName();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
new file mode 100644
index 0000000..f40fc9e
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Processor to react on an event.
+ *
+ * EventExecutors should guarantee that the implementations are called only
+ * from one thread.
+ *
+ * @param <PAYLOAD>
+ */
+@FunctionalInterface
+public interface EventHandler<PAYLOAD> {
+
+  void onMessage(PAYLOAD payload, EventPublisher publisher);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
new file mode 100644
index 0000000..a47fb57
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Client interface to send a new event.
+ */
+public interface EventPublisher {
+
+  <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void
+      fireEvent(EVENT_TYPE event, PAYLOAD payload);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
new file mode 100644
index 0000000..44d85f5
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * Simple async event processing utility.
+ * <p>
+ * Event queue handles a collection of event handlers and routes the incoming
+ * events to one (or more) event handler.
+ */
+public class EventQueue implements EventPublisher, AutoCloseable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(EventQueue.class);
+
+  private final Map<Event, Map<EventExecutor, List<EventHandler>>> executors =
+      new HashMap<>();
+
+  private final AtomicLong queuedCount = new AtomicLong(0);
+
+  private final AtomicLong eventCount = new AtomicLong(0);
+
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
+      EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
+
+    this.addHandler(event, new SingleThreadExecutor<>(
+        event.getName()), handler);
+  }
+
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
+      EVENT_TYPE event,
+      EventExecutor<PAYLOAD> executor,
+      EventHandler<PAYLOAD> handler) {
+
+    executors.putIfAbsent(event, new HashMap<>());
+    executors.get(event).putIfAbsent(executor, new ArrayList<>());
+
+    executors.get(event)
+        .get(executor)
+        .add(handler);
+  }
+
+  /**
+   * Creates one executor with multiple event handlers.
+   */
+  public void addHandlerGroup(String name, HandlerForEvent<?>...
+      eventsAndHandlers) {
+    SingleThreadExecutor sharedExecutor =
+        new SingleThreadExecutor(name);
+    for (HandlerForEvent handlerForEvent : eventsAndHandlers) {
+      addHandler(handlerForEvent.event, sharedExecutor,
+          handlerForEvent.handler);
+    }
+
+  }
+
+  /**
+   * Route an event with payload to the right listener(s).
+   *
+   * @param event   The event identifier
+   * @param payload The payload of the event.
+   * @throws IllegalArgumentException If there is no EventHandler for
+   *                                  the specific event.
+   */
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
+      EVENT_TYPE event, PAYLOAD payload) {
+
+    Map<EventExecutor, List<EventHandler>> eventExecutorListMap =
+        this.executors.get(event);
+
+    eventCount.incrementAndGet();
+    if (eventExecutorListMap != null) {
+
+      for (Map.Entry<EventExecutor, List<EventHandler>> executorAndHandlers :
+          eventExecutorListMap.entrySet()) {
+
+        for (EventHandler handler : executorAndHandlers.getValue()) {
+          queuedCount.incrementAndGet();
+
+          executorAndHandlers.getKey()
+              .onMessage(handler, payload, this);
+
+        }
+      }
+
+    } else {
+      throw new IllegalArgumentException(
+          "No event handler registered for event " + event);
+    }
+
+  }
+
+  /**
+   * This is just for unit testing, don't use it for production code.
+   * <p>
+   * It waits for all messages to be processed. If one event handler invokes an
+   * other one, the later one also should be finished.
+   * <p>
+   * Long counter overflow is not handled, therefore it's safe only for unit
+   * testing.
+   * <p>
+   * This method is just eventually consistent. In some cases it could return
+   * even if there are new messages in some of the handler. But in a simple
+   * case (one message) it will return only if the message is processed and
+   * all the dependent messages (messages which are sent by current handlers)
+   * are processed.
+   *
+   * @param timeout Timeout in seconds to wait for the processing.
+   */
+  @VisibleForTesting
+  public void processAll(long timeout) {
+    long currentTime = Time.now();
+    while (true) {
+
+      long processed = 0;
+
+      Stream<EventExecutor> allExecutor = this.executors.values().stream()
+          .flatMap(handlerMap -> handlerMap.keySet().stream());
+
+      boolean allIdle =
+          allExecutor.allMatch(executor -> executor.queuedEvents() == executor
+              .successfulEvents() + executor.failedEvents());
+
+      if (allIdle) {
+        return;
+      }
+
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+
+      if (Time.now() > currentTime + timeout) {
+        throw new AssertionError(
+            "Messages are not processed in the given timeframe. Queued: "
+                + queuedCount.get() + " Processed: " + processed);
+      }
+    }
+  }
+
+  public void close() {
+
+    Set<EventExecutor> allExecutors = this.executors.values().stream()
+        .flatMap(handlerMap -> handlerMap.keySet().stream())
+        .collect(Collectors.toSet());
+
+    allExecutors.forEach(executor -> {
+      try {
+        executor.close();
+      } catch (Exception ex) {
+        LOG.error("Can't close the executor " + executor.getName(), ex);
+      }
+    });
+  }
+
+  /**
+   * Event identifier together with the handler.
+   *
+   * @param <PAYLOAD>
+   */
+  public static class HandlerForEvent<PAYLOAD> {
+
+    private final Event<PAYLOAD> event;
+
+    private final EventHandler<PAYLOAD> handler;
+
+    public HandlerForEvent(
+        Event<PAYLOAD> event,
+        EventHandler<PAYLOAD> handler) {
+      this.event = event;
+      this.handler = handler;
+    }
+
+    public Event<PAYLOAD> getEvent() {
+      return event;
+    }
+
+    public EventHandler<PAYLOAD> getHandler() {
+      return handler;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
new file mode 100644
index 0000000..a64e3d7
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Simple EventExecutor to call all the event handler one-by-one.
+ *
+ * @param <T>
+ */
+public class SingleThreadExecutor<T> implements EventExecutor<T> {
+
+  public static final String THREAD_NAME_PREFIX = "EventQueue";
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SingleThreadExecutor.class);
+
+  private final String name;
+
+  private final ThreadPoolExecutor executor;
+
+  private final AtomicLong queuedCount = new AtomicLong(0);
+
+  private final AtomicLong successfulCount = new AtomicLong(0);
+
+  private final AtomicLong failedCount = new AtomicLong(0);
+
+  public SingleThreadExecutor(String name) {
+    this.name = name;
+
+    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>();
+    executor =
+        new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, workQueue,
+            runnable -> {
+              Thread thread = new Thread(runnable);
+              thread.setName(THREAD_NAME_PREFIX + "-" + name);
+              return thread;
+            });
+
+  }
+
+  @Override
+  public void onMessage(EventHandler<T> handler, T message, EventPublisher
+      publisher) {
+    queuedCount.incrementAndGet();
+    executor.execute(() -> {
+      try {
+        handler.onMessage(message, publisher);
+        successfulCount.incrementAndGet();
+      } catch (Exception ex) {
+        LOG.error("Error on execution message {}", message, ex);
+        failedCount.incrementAndGet();
+      }
+    });
+  }
+
+  @Override
+  public long failedEvents() {
+    return failedCount.get();
+  }
+
+  @Override
+  public long successfulEvents() {
+    return successfulCount.get();
+  }
+
+  @Override
+  public long queuedEvents() {
+    return queuedCount.get();
+  }
+
+  @Override
+  public void close() {
+    executor.shutdown();
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
new file mode 100644
index 0000000..c2159ad
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Basic event implementation to implement custom events.
+ *
+ * @param <T>
+ */
+public class TypedEvent<T> implements Event<T> {
+
+  private final Class<T> payloadType;
+
+  private final String name;
+
+  public TypedEvent(Class<T> payloadType, String name) {
+    this.payloadType = payloadType;
+    this.name = name;
+  }
+
+  public TypedEvent(Class<T> payloadType) {
+    this.payloadType = payloadType;
+    this.name = payloadType.getSimpleName();
+  }
+
+  @Override
+  public Class<T> getPayloadType() {
+    return payloadType;
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
new file mode 100644
index 0000000..89999ee
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Simple event queue implementation for hdds/ozone components.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
new file mode 100644
index 0000000..3944409
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Testing the basic functionality of the event queue.
+ */
+public class TestEventQueue {
+
+  private static final Event<Long> EVENT1 =
+      new TypedEvent<>(Long.class, "SCM_EVENT1");
+  private static final Event<Long> EVENT2 =
+      new TypedEvent<>(Long.class, "SCM_EVENT2");
+
+  private static final Event<Long> EVENT3 =
+      new TypedEvent<>(Long.class, "SCM_EVENT3");
+  private static final Event<Long> EVENT4 =
+      new TypedEvent<>(Long.class, "SCM_EVENT4");
+
+  private EventQueue queue;
+
+  @Before
+  public void startEventQueue() {
+    queue = new EventQueue();
+  }
+
+  @After
+  public void stopEventQueue() {
+    queue.close();
+  }
+
+  @Test
+  public void simpleEvent() {
+
+    final long[] result = new long[2];
+
+    queue.addHandler(EVENT1, (payload, publisher) -> result[0] = payload);
+
+    queue.fireEvent(EVENT1, 11L);
+    queue.processAll(1000);
+    Assert.assertEquals(11, result[0]);
+
+  }
+
+  @Test
+  public void multipleSubscriber() {
+    final long[] result = new long[2];
+    queue.addHandler(EVENT2, (payload, publisher) -> result[0] = payload);
+
+    queue.addHandler(EVENT2, (payload, publisher) -> result[1] = payload);
+
+    queue.fireEvent(EVENT2, 23L);
+    queue.processAll(1000);
+    Assert.assertEquals(23, result[0]);
+    Assert.assertEquals(23, result[1]);
+
+  }
+
+  @Test
+  public void handlerGroup() {
+    final long[] result = new long[2];
+    queue.addHandlerGroup(
+        "group",
+        new EventQueue.HandlerForEvent<>(EVENT3, (payload, publisher) ->
+            result[0] = payload),
+        new EventQueue.HandlerForEvent<>(EVENT4, (payload, publisher) ->
+            result[1] = payload)
+    );
+
+    queue.fireEvent(EVENT3, 23L);
+    queue.fireEvent(EVENT4, 42L);
+
+    queue.processAll(1000);
+
+    Assert.assertEquals(23, result[0]);
+    Assert.assertEquals(42, result[1]);
+
+    Set<String> eventQueueThreadNames =
+        Thread.getAllStackTraces().keySet()
+            .stream()
+            .filter(t -> t.getName().startsWith(SingleThreadExecutor
+                .THREAD_NAME_PREFIX))
+            .map(Thread::getName)
+            .collect(Collectors.toSet());
+    System.out.println(eventQueueThreadNames);
+    Assert.assertEquals(1, eventQueueThreadNames.size());
+
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4ef3514/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
new file mode 100644
index 0000000..bb05ef4
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import org.junit.Test;
+
+/**
+ * More realistic event test with sending event from one listener.
+ */
+public class TestEventQueueChain {
+
+  private static final Event<FailedNode> DECOMMISSION =
+      new TypedEvent<>(FailedNode.class);
+
+  private static final Event<FailedNode> DECOMMISSION_START =
+      new TypedEvent<>(FailedNode.class);
+
+  @Test
+  public void simpleEvent() {
+    EventQueue queue = new EventQueue();
+
+    queue.addHandler(DECOMMISSION, new PipelineManager());
+    queue.addHandler(DECOMMISSION_START, new NodeWatcher());
+
+    queue.fireEvent(DECOMMISSION, new FailedNode("node1"));
+
+    queue.processAll(5000);
+  }
+
+
+  static class FailedNode {
+    private final String nodeId;
+
+    FailedNode(String nodeId) {
+      this.nodeId = nodeId;
+    }
+
+    String getNodeId() {
+      return nodeId;
+    }
+  }
+
+  private static class PipelineManager implements EventHandler<FailedNode> {
+
+    @Override
+    public void onMessage(FailedNode message, EventPublisher publisher) {
+
+      System.out.println(
+          "Closing pipelines for all pipelines including node: " + message
+              .getNodeId());
+
+      publisher.fireEvent(DECOMMISSION_START, message);
+    }
+
+  }
+
+  private static class NodeWatcher implements EventHandler<FailedNode> {
+
+    @Override
+    public void onMessage(FailedNode message, EventPublisher publisher) {
+      System.out.println("Clear timer");
+    }
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: MAPREDUCE-7095. Race conditions in closing FadvisedChunkedFile. (Miklos Szegedi via Haibo Chen)

Posted by xy...@apache.org.
MAPREDUCE-7095. Race conditions in closing FadvisedChunkedFile. (Miklos Szegedi via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee7daf0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee7daf0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee7daf0a

Branch: refs/heads/HDDS-4
Commit: ee7daf0a9d43206304f128c477c8dcd2e73c1e35
Parents: 9b9e145
Author: Haibo Chen <ha...@apache.org>
Authored: Thu May 10 10:42:26 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/io/ReadaheadPool.java     |  6 ++-
 .../hadoop/mapred/FadvisedChunkedFile.java      | 57 +++++++++++++-------
 .../hadoop/mapred/TestFadvisedChunkedFile.java  | 55 +++++++++++++++++++
 3 files changed, 98 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7daf0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
index 2e65f12..7cd7f98 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
@@ -205,8 +205,10 @@ public class ReadaheadPool {
       // It's also possible that we'll end up requesting readahead on some
       // other FD, which may be wasted work, but won't cause a problem.
       try {
-        NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
-            fd, off, len, POSIX_FADV_WILLNEED);
+        if (fd.valid()) {
+          NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+              identifier, fd, off, len, POSIX_FADV_WILLNEED);
+        }
       } catch (IOException ioe) {
         if (canceled) {
           // no big deal - the reader canceled the request and closed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7daf0a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
index 6a4e3b4..e9f0f34 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
@@ -22,6 +22,7 @@ import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -37,13 +38,14 @@ public class FadvisedChunkedFile extends ChunkedFile {
   private static final Logger LOG =
       LoggerFactory.getLogger(FadvisedChunkedFile.class);
 
+  private final Object closeLock = new Object();
   private final boolean manageOsCache;
   private final int readaheadLength;
   private final ReadaheadPool readaheadPool;
   private final FileDescriptor fd;
   private final String identifier;
 
-  private ReadaheadRequest readaheadRequest;
+  private volatile ReadaheadRequest readaheadRequest;
 
   public FadvisedChunkedFile(RandomAccessFile file, long position, long count,
       int chunkSize, boolean manageOsCache, int readaheadLength,
@@ -56,31 +58,50 @@ public class FadvisedChunkedFile extends ChunkedFile {
     this.identifier = identifier;
   }
 
+  @VisibleForTesting
+  FileDescriptor getFd() {
+    return fd;
+  }
+
   @Override
   public Object nextChunk() throws Exception {
-    if (manageOsCache && readaheadPool != null) {
-      readaheadRequest = readaheadPool
-          .readaheadStream(identifier, fd, getCurrentOffset(), readaheadLength,
-              getEndOffset(), readaheadRequest);
+    synchronized (closeLock) {
+      if (fd.valid()) {
+        if (manageOsCache && readaheadPool != null) {
+          readaheadRequest = readaheadPool
+              .readaheadStream(
+                  identifier, fd, getCurrentOffset(), readaheadLength,
+                  getEndOffset(), readaheadRequest);
+        }
+        return super.nextChunk();
+      } else {
+        return null;
+      }
     }
-    return super.nextChunk();
   }
 
   @Override
   public void close() throws Exception {
-    if (readaheadRequest != null) {
-      readaheadRequest.cancel();
-    }
-    if (manageOsCache && getEndOffset() - getStartOffset() > 0) {
-      try {
-        NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
-            fd,
-            getStartOffset(), getEndOffset() - getStartOffset(),
-            POSIX_FADV_DONTNEED);
-      } catch (Throwable t) {
-        LOG.warn("Failed to manage OS cache for " + identifier, t);
+    synchronized (closeLock) {
+      if (readaheadRequest != null) {
+        readaheadRequest.cancel();
+        readaheadRequest = null;
+      }
+      if (fd.valid() &&
+          manageOsCache && getEndOffset() - getStartOffset() > 0) {
+        try {
+          NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+              identifier,
+              fd,
+              getStartOffset(), getEndOffset() - getStartOffset(),
+              POSIX_FADV_DONTNEED);
+        } catch (Throwable t) {
+          LOG.warn("Failed to manage OS cache for " + identifier +
+              " fd " + fd.toString(), t);
+        }
       }
+      // fd becomes invalid upon closing
+      super.close();
     }
-    super.close();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7daf0a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestFadvisedChunkedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestFadvisedChunkedFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestFadvisedChunkedFile.java
new file mode 100644
index 0000000..b6d0fd2
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestFadvisedChunkedFile.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import org.junit.Test;
+
+import java.io.File;
+import java.io.RandomAccessFile;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Unit test for FadvisedChunkedFile.
+ */
+public class TestFadvisedChunkedFile {
+
+  @Test
+  public void testDoubleClose() throws Exception {
+    File absoluteFile = new File("target",
+        TestFadvisedChunkedFile.class.getSimpleName()).getAbsoluteFile();
+    absoluteFile.deleteOnExit();
+    try {
+      try (RandomAccessFile f = new RandomAccessFile(
+          absoluteFile.getAbsolutePath(), "rw")) {
+        FadvisedChunkedFile af = new FadvisedChunkedFile(
+            f, 0, 5, 2, true,
+            10, null, "foo");
+
+        assertTrue("fd not valid", f.getFD().valid());
+        af.close();
+        assertFalse("fd still valid", f.getFD().valid());
+        af.close();
+        assertFalse("fd still valid", f.getFD().valid());
+      }
+    } finally {
+      absoluteFile.delete();
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDDS-51. Fix TestDeletedBlockLog#testDeletedBlockTransactions. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-51. Fix TestDeletedBlockLog#testDeletedBlockTransactions.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05d04b58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05d04b58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05d04b58

Branch: refs/heads/HDDS-4
Commit: 05d04b586f92ec63fd389269a2a6acc1ec69211d
Parents: 7befa84
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 13:37:34 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d04b58/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index f872e23..8c12806 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdds.scm.block;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.Mapping;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -88,12 +89,10 @@ public class TestDeletedBlockLog {
     int continerIDBase = random.nextInt(100);
     int localIDBase = random.nextInt(1000);
     for (int i = 0; i < dataSize; i++) {
-      //String containerName = "container-" + UUID.randomUUID().toString();
       long containerID = continerIDBase + i;
       List<Long> blocks = new ArrayList<>();
       int blockSize = random.nextInt(30) + 1;
       for (int j = 0; j < blockSize; j++)  {
-        //blocks.add("block-" + UUID.randomUUID().toString());
         long localID = localIDBase + j;
         blocks.add(localID);
       }
@@ -266,7 +265,7 @@ public class TestDeletedBlockLog {
 
     int count = 0;
     long containerID = 0L;
-    DatanodeDetails dnDd1 = DatanodeDetails.newBuilder()
+    DatanodeDetails dnId1 = DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID().toString())
         .setIpAddress("127.0.0.1")
         .setHostName("localhost")
@@ -293,7 +292,7 @@ public class TestDeletedBlockLog {
 
       // make TX[1-6] for datanode1; TX[7-10] for datanode2
       if (count <= (maximumAllowedTXNum + 1)) {
-        mockContainerInfo(mappingService, containerID, dnDd1);
+        mockContainerInfo(mappingService, containerID, dnId1);
       } else {
         mockContainerInfo(mappingService, containerID, dnId2);
       }
@@ -323,7 +322,7 @@ public class TestDeletedBlockLog {
     Assert.assertFalse(transactions.isFull());
     // The number of TX in dnID1 won't more than maximum value.
     Assert.assertEquals(maximumAllowedTXNum,
-        transactions.getDatanodeTransactions(dnDd1.getUuid()).size());
+        transactions.getDatanodeTransactions(dnId1.getUuid()).size());
 
     int size = transactions.getDatanodeTransactions(dnId2.getUuid()).size();
     // add duplicated container in dnID2, this should be failed.
@@ -339,6 +338,7 @@ public class TestDeletedBlockLog {
         transactions.getDatanodeTransactions(dnId2.getUuid()).size());
 
     // Add new TX in dnID2, then dnID2 will reach maximum value.
+    containerID = RandomUtils.nextLong();
     builder = DeletedBlocksTransaction.newBuilder();
     builder.setTxID(12);
     builder.setContainerID(containerID);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: YARN-8265. Improve DNS handling on docker IP changes. Contributed by Billie Rinaldi

Posted by xy...@apache.org.
YARN-8265.  Improve DNS handling on docker IP changes.
            Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce2c1b0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce2c1b0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce2c1b0c

Branch: refs/heads/HDDS-4
Commit: ce2c1b0ceb692cb5bdf82e87c2f08e323972ca38
Parents: d1ba968
Author: Eric Yang <ey...@HW13750.local>
Authored: Fri May 11 22:37:43 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../component/instance/ComponentInstance.java   | 45 ++++++++---
 .../hadoop/yarn/service/MockServiceAM.java      | 17 ++++-
 .../hadoop/yarn/service/TestServiceAM.java      | 42 ++++++++++
 .../linux/runtime/docker/TestDockerClient.java  | 80 ++++++++++++++++++++
 4 files changed, 173 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2c1b0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 4aca0ea..a323649 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.service.ServiceScheduler;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
 import org.apache.hadoop.yarn.service.api.records.ContainerState;
 import org.apache.hadoop.yarn.service.component.Component;
 import org.apache.hadoop.yarn.service.component.ComponentEvent;
@@ -151,10 +152,19 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     @Override public void transition(ComponentInstance compInstance,
         ComponentInstanceEvent event) {
       // Query container status for ip and host
+      boolean cancelOnSuccess = true;
+      if (compInstance.getCompSpec().getArtifact() != null && compInstance
+          .getCompSpec().getArtifact().getType() == Artifact.TypeEnum.DOCKER) {
+        // A docker container might get a different IP if the container is
+        // relaunched by the NM, so we need to keep checking the status.
+        // This is a temporary fix until the NM provides a callback for
+        // container relaunch (see YARN-8265).
+        cancelOnSuccess = false;
+      }
       compInstance.containerStatusFuture =
           compInstance.scheduler.executorService.scheduleAtFixedRate(
               new ContainerStatusRetriever(compInstance.scheduler,
-                  event.getContainerId(), compInstance), 0, 1,
+                  event.getContainerId(), compInstance, cancelOnSuccess), 0, 1,
               TimeUnit.SECONDS);
       long containerStartTime = System.currentTimeMillis();
       try {
@@ -373,14 +383,26 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     this.status = status;
     org.apache.hadoop.yarn.service.api.records.Container container =
         getCompSpec().getContainer(status.getContainerId().toString());
+    boolean doRegistryUpdate = true;
     if (container != null) {
-      container.setIp(StringUtils.join(",", status.getIPs()));
+      String existingIP = container.getIp();
+      String newIP = StringUtils.join(",", status.getIPs());
+      container.setIp(newIP);
       container.setHostname(status.getHost());
-      if (timelineServiceEnabled) {
+      if (existingIP != null && newIP.equals(existingIP)) {
+        doRegistryUpdate = false;
+      }
+      if (timelineServiceEnabled && doRegistryUpdate) {
         serviceTimelinePublisher.componentInstanceIPHostUpdated(container);
       }
     }
-    updateServiceRecord(yarnRegistryOperations, status);
+    if (doRegistryUpdate) {
+      cleanupRegistry(status.getContainerId());
+      LOG.info(
+          getCompInstanceId() + " new IP = " + status.getIPs() + ", host = "
+              + status.getHost() + ", updating registry");
+      updateServiceRecord(yarnRegistryOperations, status);
+    }
   }
 
   public String getCompName() {
@@ -522,12 +544,15 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     private NodeId nodeId;
     private NMClient nmClient;
     private ComponentInstance instance;
+    private boolean cancelOnSuccess;
     ContainerStatusRetriever(ServiceScheduler scheduler,
-        ContainerId containerId, ComponentInstance instance) {
+        ContainerId containerId, ComponentInstance instance, boolean
+        cancelOnSuccess) {
       this.containerId = containerId;
       this.nodeId = instance.getNodeId();
       this.nmClient = scheduler.getNmClient().getClient();
       this.instance = instance;
+      this.cancelOnSuccess = cancelOnSuccess;
     }
     @Override public void run() {
       ContainerStatus status = null;
@@ -548,10 +573,12 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
         return;
       }
       instance.updateContainerStatus(status);
-      LOG.info(
-          instance.compInstanceId + " IP = " + status.getIPs() + ", host = "
-              + status.getHost() + ", cancel container status retriever");
-      instance.containerStatusFuture.cancel(false);
+      if (cancelOnSuccess) {
+        LOG.info(
+            instance.compInstanceId + " IP = " + status.getIPs() + ", host = "
+                + status.getHost() + ", cancel container status retriever");
+        instance.containerStatusFuture.cancel(false);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2c1b0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
index 04b0347..4a75aef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
@@ -317,6 +317,14 @@ public class MockServiceAM extends ServiceMaster {
     }
   }
 
+  public Container updateContainerStatus(Service service, int id,
+      String compName, String host) {
+    ContainerId containerId = createContainerId(id);
+    Container container = createContainer(containerId, compName);
+    addContainerStatus(container, ContainerState.RUNNING, host);
+    return container;
+  }
+
   public ContainerId createContainerId(int id) {
     ApplicationId applicationId = ApplicationId.fromString(service.getId());
     return ContainerId.newContainerId(
@@ -389,10 +397,15 @@ public class MockServiceAM extends ServiceMaster {
   }
 
   private void addContainerStatus(Container container, ContainerState state) {
+    addContainerStatus(container, state, container.getNodeId().getHost());
+  }
+
+  private void addContainerStatus(Container container, ContainerState state,
+      String host) {
     ContainerStatus status = ContainerStatus.newInstance(container.getId(),
         state, "", 0);
-    status.setHost(container.getNodeId().getHost());
-    status.setIPs(Lists.newArrayList(container.getNodeId().getHost()));
+    status.setHost(host);
+    status.setIPs(Lists.newArrayList(host));
     containerStatuses.put(container.getId(), status);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2c1b0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
index 260976a..e9478f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.api.records.Service;
@@ -349,4 +350,45 @@ public class TestServiceAM extends ServiceTestUtils{
 
     am.stop();
   }
+
+  @Test
+  public void testIPChange() throws TimeoutException,
+      InterruptedException {
+    ApplicationId applicationId = ApplicationId.newInstance(123456, 1);
+    String comp1Name = "comp1";
+    String comp1InstName = "comp1-0";
+    Service exampleApp = new Service();
+    exampleApp.setId(applicationId.toString());
+    exampleApp.setVersion("v1");
+    exampleApp.setName("testIPChange");
+    Component comp1 = createComponent(comp1Name, 1, "sleep 60");
+    comp1.setArtifact(new Artifact().type(Artifact.TypeEnum.DOCKER));
+    exampleApp.addComponent(comp1);
+
+    MockServiceAM am = new MockServiceAM(exampleApp);
+    am.init(conf);
+    am.start();
+
+    ComponentInstance comp1inst0 = am.getCompInstance(comp1Name, comp1InstName);
+    // allocate a container
+    am.feedContainerToComp(exampleApp, 1, comp1Name);
+    GenericTestUtils.waitFor(() -> comp1inst0.getContainerStatus() != null,
+        2000, 200000);
+    // first host status will match the container nodeId
+    Assert.assertEquals("localhost",
+        comp1inst0.getContainerStatus().getHost());
+
+    LOG.info("Change the IP and host");
+    // change the container status
+    am.updateContainerStatus(exampleApp, 1, comp1Name, "new.host");
+    GenericTestUtils.waitFor(() -> comp1inst0.getContainerStatus().getHost()
+        .equals("new.host"), 2000, 200000);
+
+    LOG.info("Change the IP and host again");
+    // change the container status
+    am.updateContainerStatus(exampleApp, 1, comp1Name, "newer.host");
+    GenericTestUtils.waitFor(() -> comp1inst0.getContainerStatus().getHost()
+        .equals("newer.host"), 2000, 200000);
+    am.stop();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2c1b0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
new file mode 100644
index 0000000..efd7db5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+/** Unit tests for DockerClient. */
+public class TestDockerClient {
+  private static final File TEST_ROOT_DIR = GenericTestUtils.getTestDir(
+      TestDockerClient.class.getName());
+
+  @Before
+  public void setup() {
+    TEST_ROOT_DIR.mkdirs();
+  }
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_ROOT_DIR);
+  }
+
+  @Test
+  public void testWriteCommandToTempFile() throws Exception {
+    String absRoot = TEST_ROOT_DIR.getAbsolutePath();
+    ApplicationId appId = ApplicationId.newInstance(1, 1);
+    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId cid = ContainerId.newContainerId(attemptId, 1);
+    DockerCommand dockerCmd = new DockerInspectCommand(cid.toString());
+    Configuration conf = new Configuration();
+    conf.set("hadoop.tmp.dir", absRoot);
+    conf.set(YarnConfiguration.NM_LOCAL_DIRS, absRoot);
+    conf.set(YarnConfiguration.NM_LOG_DIRS, absRoot);
+    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
+    Context mockContext = mock(Context.class);
+    doReturn(conf).when(mockContext).getConf();
+    doReturn(dirsHandler).when(mockContext).getLocalDirsHandler();
+
+    DockerClient dockerClient = new DockerClient(conf);
+    dirsHandler.init(conf);
+    dirsHandler.start();
+    String tmpPath = dockerClient.writeCommandToTempFile(dockerCmd, cid,
+        mockContext);
+    dirsHandler.stop();
+    File tmpFile = new File(tmpPath);
+    assertTrue(tmpFile + " was not created", tmpFile.exists());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDDS-30. Fix TestContainerSQLCli. Contributed by Shashikant Banerjee.

Posted by xy...@apache.org.
HDDS-30. Fix TestContainerSQLCli. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ecfc2a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ecfc2a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ecfc2a7

Branch: refs/heads/HDDS-4
Commit: 7ecfc2a7a7225dca98b6c3cd566cccf14dedd806
Parents: d3183b3
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 10 11:28:35 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../hadoop/ozone/scm/TestContainerSQLCli.java   | 23 ------
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 73 ++++----------------
 2 files changed, 13 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ecfc2a7/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index dabe903..4d70af8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -177,29 +177,6 @@ public class TestContainerSQLCli {
   }
 
   @Test
-  public void testConvertBlockDB() throws Exception {
-    String dbOutPath = GenericTestUtils.getTempPath(
-        UUID.randomUUID() + "/out_sql.db");
-    String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
-    String dbPath = dbRootPath + "/" + BLOCK_DB;
-    String[] args = {"-p", dbPath, "-o", dbOutPath};
-
-    cli.run(args);
-
-    Connection conn = connectDB(dbOutPath);
-    String sql = "SELECT * FROM blockContainer";
-    ResultSet rs = executeQuery(conn, sql);
-    while(rs.next()) {
-      String blockKey = rs.getString("blockKey");
-      String containerName = rs.getString("containerName");
-      assertTrue(blockContainerMap.containsKey(blockKey) &&
-          blockContainerMap.remove(blockKey).equals(containerName));
-    }
-    assertEquals(0, blockContainerMap.size());
-    Files.delete(Paths.get(dbOutPath));
-  }
-
-  @Test
   public void testConvertNodepoolDB() throws Exception {
     String dbOutPath = GenericTestUtils.getTempPath(
         UUID.randomUUID() + "/out_sql.db");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ecfc2a7/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index edfbf02..028b1fc 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.ozone.scm.cli;
 
 import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
+import com.google.protobuf.ByteString;
 import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
@@ -86,7 +88,7 @@ public class SQLCLI  extends Configured implements Tool {
   // for container.db
   private static final String CREATE_CONTAINER_INFO =
       "CREATE TABLE containerInfo (" +
-          "containerName TEXT PRIMARY KEY NOT NULL, " +
+          "containerID LONG PRIMARY KEY NOT NULL, " +
           "leaderUUID TEXT NOT NULL)";
   private static final String CREATE_CONTAINER_MEMBERS =
       "CREATE TABLE containerMembers (" +
@@ -100,8 +102,8 @@ public class SQLCLI  extends Configured implements Tool {
           "ipAddress TEXT, " +
           "containerPort INTEGER NOT NULL);";
   private static final String INSERT_CONTAINER_INFO =
-      "INSERT INTO containerInfo (containerName, leaderUUID) " +
-          "VALUES (\"%s\", \"%s\")";
+      "INSERT INTO containerInfo (containerID, leaderUUID) " +
+          "VALUES (\"%d\", \"%s\")";
   private static final String INSERT_DATANODE_INFO =
       "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " +
           "containerPort) " +
@@ -109,14 +111,6 @@ public class SQLCLI  extends Configured implements Tool {
   private static final String INSERT_CONTAINER_MEMBERS =
       "INSERT INTO containerMembers (containerName, datanodeUUID) " +
           "VALUES (\"%s\", \"%s\")";
-  // for block.db
-  private static final String CREATE_BLOCK_CONTAINER =
-      "CREATE TABLE blockContainer (" +
-          "blockKey TEXT PRIMARY KEY NOT NULL, " +
-          "containerName TEXT NOT NULL)";
-  private static final String INSERT_BLOCK_CONTAINER =
-      "INSERT INTO blockContainer (blockKey, containerName) " +
-          "VALUES (\"%s\", \"%s\")";
   // for nodepool.db
   private static final String CREATE_NODE_POOL =
       "CREATE TABLE nodePool (" +
@@ -291,10 +285,7 @@ public class SQLCLI  extends Configured implements Tool {
     if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
       LOG.info("Converting container DB");
       convertContainerDB(dbPath, outPath);
-    } else if (dbName.toString().equals(BLOCK_DB)) {
-      LOG.info("Converting block DB");
-      convertBlockDB(dbPath, outPath);
-    } else if (dbName.toString().equals(NODEPOOL_DB)) {
+    }  else if (dbName.toString().equals(NODEPOOL_DB)) {
       LOG.info("Converting node pool DB");
       convertNodePoolDB(dbPath, outPath);
     } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
@@ -498,14 +489,14 @@ public class SQLCLI  extends Configured implements Tool {
 
       HashSet<String> uuidChecked = new HashSet<>();
       dbStore.iterate(null, (key, value) -> {
-        String containerName = new String(key, encoding);
+        long containerID = Longs.fromByteArray(key);
         ContainerInfo containerInfo = null;
         containerInfo = ContainerInfo.fromProtobuf(
             HddsProtos.SCMContainerInfo.PARSER.parseFrom(value));
         Preconditions.checkNotNull(containerInfo);
         try {
           //TODO: include container state to sqllite schema
-          insertContainerDB(conn, containerName,
+          insertContainerDB(conn, containerID,
               containerInfo.getPipeline().getProtobufMessage(), uuidChecked);
           return true;
         } catch (SQLException e) {
@@ -518,16 +509,16 @@ public class SQLCLI  extends Configured implements Tool {
   /**
    * Insert into the sqlite DB of container.db.
    * @param conn the connection to the sqlite DB.
-   * @param containerName the name of the container.
+   * @param containerID the id of the container.
    * @param pipeline the actual container pipeline object.
    * @param uuidChecked the uuid that has been already inserted.
    * @throws SQLException throws exception.
    */
-  private void insertContainerDB(Connection conn, String containerName,
+  private void insertContainerDB(Connection conn, long containerID,
       Pipeline pipeline, Set<String> uuidChecked) throws SQLException {
-    LOG.info("Insert to sql container db, for container {}", containerName);
+    LOG.info("Insert to sql container db, for container {}", containerID);
     String insertContainerInfo = String.format(
-        INSERT_CONTAINER_INFO, containerName,
+        INSERT_CONTAINER_INFO, containerID,
         pipeline.getPipelineChannel().getLeaderID());
     executeSQL(conn, insertContainerInfo);
 
@@ -546,49 +537,11 @@ public class SQLCLI  extends Configured implements Tool {
         uuidChecked.add(uuid);
       }
       String insertContainerMembers = String.format(
-          INSERT_CONTAINER_MEMBERS, containerName, uuid);
+          INSERT_CONTAINER_MEMBERS, containerID, uuid);
       executeSQL(conn, insertContainerMembers);
     }
     LOG.info("Insertion completed.");
   }
-
-  /**
-   * Converts block.db to sqlite. This is rather simple db, the schema has only
-   * one table:
-   *
-   * blockContainer
-   * --------------------------
-   * blockKey*  | containerName
-   * --------------------------
-   *
-   * @param dbPath path to container db.
-   * @param outPath path to output sqlite
-   * @throws IOException throws exception.
-   */
-  private void convertBlockDB(Path dbPath, Path outPath) throws Exception {
-    LOG.info("Create tables for sql block db.");
-    File dbFile = dbPath.toFile();
-    try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
-        .setConf(conf).setDbFile(dbFile).build();
-        Connection conn = connectDB(outPath.toString())) {
-      executeSQL(conn, CREATE_BLOCK_CONTAINER);
-
-      dbStore.iterate(null, (key, value) -> {
-        String blockKey = DFSUtilClient.bytes2String(key);
-        String containerName = DFSUtilClient.bytes2String(value);
-        String insertBlockContainer = String.format(
-            INSERT_BLOCK_CONTAINER, blockKey, containerName);
-
-        try {
-          executeSQL(conn, insertBlockContainer);
-          return true;
-        } catch (SQLException e) {
-          throw new IOException(e);
-        }
-      });
-    }
-  }
-
   /**
    * Converts nodePool.db to sqlite. The schema of sql db:
    * two tables, nodePool and datanodeInfo (the same datanode Info as for


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HDDS-39. Ozone: Compile Ozone/HDFS/Cblock protobuf files with proto3 compiler using maven protoc plugin. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-39. Ozone: Compile Ozone/HDFS/Cblock protobuf files with proto3 compiler using maven protoc plugin.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0db646d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0db646d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0db646d

Branch: refs/heads/HDDS-4
Commit: d0db646d417a4c0453a5ca6f1889a4dc6cda27b3
Parents: 62e6ba9
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 11:08:45 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/XceiverClient.java   | 16 +++---
 .../hadoop/hdds/scm/XceiverClientHandler.java   | 10 ++--
 .../hdds/scm/XceiverClientInitializer.java      | 18 ++++---
 .../hadoop/hdds/scm/XceiverClientMetrics.java   |  2 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java     | 18 +++----
 .../scm/client/ContainerOperationClient.java    |  5 +-
 .../hdds/scm/storage/ChunkInputStream.java      |  6 +--
 .../hdds/scm/storage/ChunkOutputStream.java     |  8 +--
 hadoop-hdds/common/pom.xml                      | 52 +++++++++++++++++++-
 .../org/apache/hadoop/hdds/client/BlockID.java  |  2 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java       |  4 +-
 .../hadoop/hdds/scm/client/ScmClient.java       |  3 +-
 .../helpers/StorageContainerException.java      |  2 +-
 .../scm/storage/ContainerProtocolCalls.java     | 40 ++++++++-------
 .../container/common/helpers/ChunkInfo.java     |  2 +-
 .../ozone/container/common/helpers/KeyData.java |  2 +-
 .../com/google/protobuf/ShadedProtoUtil.java    | 38 --------------
 .../com/google/protobuf/package-info.java       | 22 ---------
 .../main/proto/DatanodeContainerProtocol.proto  |  2 +-
 .../container/common/helpers/ChunkUtils.java    | 36 +++++++-------
 .../container/common/helpers/ContainerData.java |  4 +-
 .../common/helpers/ContainerMetrics.java        |  2 +-
 .../common/helpers/ContainerUtils.java          |  6 +--
 .../container/common/helpers/FileUtils.java     |  4 +-
 .../container/common/helpers/KeyUtils.java      | 10 ++--
 .../container/common/impl/ChunkManagerImpl.java | 10 ++--
 .../common/impl/ContainerManagerImpl.java       | 42 ++++++++--------
 .../ozone/container/common/impl/Dispatcher.java | 26 +++++-----
 .../container/common/impl/KeyManagerImpl.java   |  6 +--
 .../common/interfaces/ChunkManager.java         |  2 +-
 .../common/interfaces/ContainerDispatcher.java  |  4 +-
 .../background/BlockDeletingService.java        |  5 +-
 .../common/transport/server/XceiverServer.java  | 15 +++---
 .../transport/server/XceiverServerHandler.java  |  8 +--
 .../server/XceiverServerInitializer.java        | 18 ++++---
 .../server/ratis/ContainerStateMachine.java     | 25 ++++------
 .../scm/cli/container/InfoContainerHandler.java |  5 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |  2 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |  2 +-
 .../client/io/OzoneContainerTranslation.java    |  2 +-
 .../ozone/container/ContainerTestHelper.java    | 10 ++--
 .../common/TestBlockDeletingService.java        |  2 +-
 .../common/impl/TestContainerPersistence.java   |  6 +--
 .../container/metrics/TestContainerMetrics.java |  8 +--
 .../container/ozoneimpl/TestOzoneContainer.java |  3 +-
 .../container/server/TestContainerServer.java   | 10 ++--
 .../ozone/scm/TestContainerSmallFile.java       |  2 +-
 .../ozone/scm/TestXceiverClientMetrics.java     | 12 ++---
 .../hadoop/ozone/web/client/TestKeys.java       |  2 +-
 .../genesis/BenchMarkDatanodeDispatcher.java    | 27 ++++++----
 hadoop-project/pom.xml                          | 11 +++--
 51 files changed, 292 insertions(+), 287 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
index 5c702c6..6d33cd4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
@@ -20,18 +20,18 @@ package org.apache.hadoop.hdds.scm;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import io.netty.bootstrap.Bootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
+import org.apache.ratis.shaded.io.netty.bootstrap.Bootstrap;
+import org.apache.ratis.shaded.io.netty.channel.Channel;
+import org.apache.ratis.shaded.io.netty.channel.EventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.socket.nio.NioSocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LogLevel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LoggingHandler;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
index e2b55ac..6a2286c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
@@ -18,13 +18,13 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
+import org.apache.ratis.shaded.io.netty.channel.Channel;
+import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext;
+import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
index e10a9f6..90e2f5a 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
@@ -17,15 +17,17 @@
  */
 package org.apache.hadoop.hdds.scm;
 
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufEncoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
+import org.apache.ratis.shaded.io.netty.channel.ChannelInitializer;
+import org.apache.ratis.shaded.io.netty.channel.ChannelPipeline;
+import org.apache.ratis.shaded.io.netty.channel.socket.SocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufEncoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+    .ProtobufVarint32FrameDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+    .ProtobufVarint32LengthFieldPrepender;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import java.util.concurrent.Semaphore;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
index a61eba1..fbc348c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index d010c69..0effa8f 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,14 +19,15 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.ratis.shaded.com.google.protobuf
+    .InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.ratis.RatisHelper;
@@ -37,7 +38,6 @@ import org.apache.ratis.protocol.RaftPeer;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -211,8 +211,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
   private RaftClientReply sendRequest(ContainerCommandRequestProto request)
       throws IOException {
     boolean isReadOnlyRequest = isReadOnly(request);
-    ByteString byteString =
-        ShadedProtoUtil.asShadedByteString(request.toByteArray());
+    ByteString byteString = request.toByteString();
     LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
     final RaftClientReply reply =  isReadOnlyRequest ?
         getClient().sendReadOnly(() -> byteString) :
@@ -224,8 +223,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
   private CompletableFuture<RaftClientReply> sendRequestAsync(
       ContainerCommandRequestProto request) throws IOException {
     boolean isReadOnlyRequest = isReadOnly(request);
-    ByteString byteString =
-        ShadedProtoUtil.asShadedByteString(request.toByteArray());
+    ByteString byteString = request.toByteString();
     LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
     return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) :
         getClient().sendAsync(() -> byteString);
@@ -237,7 +235,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     final RaftClientReply reply = sendRequest(request);
     Preconditions.checkState(reply.isSuccess());
     return ContainerCommandResponseProto.parseFrom(
-        ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
+        reply.getMessage().getContent());
   }
 
   /**
@@ -257,7 +255,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
         .thenApply(reply -> {
           try {
             return ContainerCommandResponseProto.parseFrom(
-                ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
+                reply.getMessage().getContent());
           } catch (InvalidProtocolBufferException e) {
             throw new CompletionException(e);
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 15d197c..07f6cec 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -25,8 +25,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadContainerResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
index c4c3362..020c684 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
@@ -18,12 +18,12 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadChunkResponseProto;
 import org.apache.hadoop.hdds.client.BlockID;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 8fce00d..779e636 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index bf53042..6310df1 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -78,8 +78,59 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </dependencies>
 
   <build>
+    <extensions>
+      <extension>
+        <groupId>kr.motd.maven</groupId>
+        <artifactId>os-maven-plugin</artifactId>
+        <version>${os-maven-plugin.version}</version>
+      </extension>
+    </extensions>
     <plugins>
       <plugin>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
+        <version>${protobuf-maven-plugin.version}</version>
+        <extensions>true</extensions>
+        <configuration>
+          <protocArtifact>
+            com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier}
+          </protocArtifact>
+          <protoSourceRoot>${basedir}/src/main/proto/</protoSourceRoot>
+          <includes>
+            <include>DatanodeContainerProtocol.proto</include>
+          </includes>
+          <outputDirectory>target/generated-sources/java</outputDirectory>
+          <clearOutputDirectory>false</clearOutputDirectory>
+        </configuration>
+        <executions>
+          <execution>
+            <id>compile-protoc</id>
+              <goals>
+                <goal>compile</goal>
+                <goal>test-compile</goal>
+              </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>generate-sources</phase>
+            <configuration>
+              <tasks>
+                <replace token="com.google.protobuf" value="org.apache.ratis.shaded.com.google.protobuf"
+                  dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/datanode/proto">
+                </replace>
+              </tasks>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
@@ -107,7 +158,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                 <directory>${basedir}/src/main/proto</directory>
                 <includes>
                   <include>StorageContainerLocationProtocol.proto</include>
-                  <include>DatanodeContainerProtocol.proto</include>
                   <include>hdds.proto</include>
                   <include>ScmBlockLocationProtocol.proto</include>
                 </includes>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
index 355a36d..7bf8f01 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
@@ -17,7 +17,7 @@
 package org.apache.hadoop.hdds.client;
 
 import org.apache.commons.lang.builder.ToStringBuilder;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
index c96f79b..56cc741 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
@@ -21,9 +21,9 @@ package org.apache.hadoop.hdds.scm;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index dcf9fed..b52819a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdds.scm.client;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerData;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
index 35d8444..f1405ff 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.container.common.helpers;
 
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index ca388d9..5fbf373 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -18,39 +18,41 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .GetKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetKeyResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetSmallFileResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .PutKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadChunkResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
index 7cf95a9..21916b5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.ozone.container.common.helpers;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import java.io.IOException;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
index c485c7f..129e4a8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.ozone.container.common.helpers;
 
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
deleted file mode 100644
index 29242ad..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.ratis.shaded.com.google.protobuf;
-
-/** Utilities for the shaded protobuf in Ratis. */
-public interface ShadedProtoUtil {
-  /**
-   * @param bytes
-   * @return the wrapped shaded {@link ByteString} (no coping).
-   */
-  static ByteString asShadedByteString(byte[] bytes) {
-    return ByteString.wrap(bytes);
-  }
-
-  /**
-   * @param shaded
-   * @return a {@link com.google.protobuf.ByteString} (require coping).
-   */
-  static com.google.protobuf.ByteString asByteString(ByteString shaded) {
-    return com.google.protobuf.ByteString.copyFrom(
-        shaded.asReadOnlyByteBuffer());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
deleted file mode 100644
index 032dd96..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ratis.shaded.com.google.protobuf;
-
-/**
- * This package contains classes related to the shaded protobuf in Apache Ratis.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 80bc22d..3479866 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -24,7 +24,7 @@
 
 // This file contains protocol buffers that are used to transfer data
 // to and from the datanode.
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
+option java_package = "org.apache.hadoop.hdds.protocol.datanode.proto";
 option java_outer_classname = "ContainerProtos";
 option java_generate_equals_and_hash = true;
 package hadoop.hdds.datanode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
index 8c5609d..eba8594 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
@@ -18,12 +18,12 @@
 package org.apache.hadoop.ozone.container.common.helpers;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
@@ -40,22 +40,22 @@ import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.concurrent.ExecutionException;
 
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CHECKSUM_MISMATCH;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CONTAINER_NOT_FOUND;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .INVALID_WRITE_SIZE;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .IO_EXCEPTION;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .OVERWRITE_FLAG_REQUIRED;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .UNABLE_TO_FIND_CHUNK;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .UNABLE_TO_FIND_DATA_DIR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CHECKSUM_MISMATCH;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CONTAINER_INTERNAL_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CONTAINER_NOT_FOUND;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.INVALID_WRITE_SIZE;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.IO_EXCEPTION;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.OVERWRITE_FLAG_REQUIRED;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNABLE_TO_FIND_CHUNK;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNABLE_TO_FIND_DATA_DIR;
 
 /**
  * Set of utility functions used by the chunk Manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 947dc7d..63111c8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerLifeCycleState;
 import org.apache.hadoop.ozone.OzoneConsts;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index d4d732b..4300b2d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.container.common.helpers;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 959d88c..9b52316 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
@@ -42,9 +42,9 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 
 import static org.apache.commons.io.FilenameUtils.removeExtension;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result
     .INVALID_ARGUMENT;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result
     .UNABLE_TO_FIND_DATA_DIR;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
index ec27452..a2875be 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.ozone.container.common.helpers;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 /**
  * File Utils are helper routines used by putSmallFile and getSmallFile

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
index dbd5772..f831d45 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
@@ -21,17 +21,17 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.utils.MetadataStore;
 
 import java.io.IOException;
 import java.nio.charset.Charset;
 
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .NO_SUCH_KEY;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .UNABLE_TO_READ_METADATA_DB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.NO_SUCH_KEY;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNABLE_TO_READ_METADATA_DB;
 
 /**
  * Utils functions to help key functions.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
index 3505196..fa82026 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
@@ -21,7 +21,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@@ -40,10 +40,10 @@ import java.nio.file.StandardCopyOption;
 import java.security.NoSuchAlgorithmException;
 import java.util.concurrent.ExecutionException;
 
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .UNSUPPORTED_REQUEST;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CONTAINER_INTERNAL_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNSUPPORTED_REQUEST;
 
 /**
  * An implementation of ChunkManager that is used by default in ozone.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index cb60334..240beba 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
@@ -79,26 +79,26 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CONTAINER_EXISTS;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CONTAINER_NOT_FOUND;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .ERROR_IN_COMPACT_DB;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .INVALID_CONFIG;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .IO_EXCEPTION;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .UNABLE_TO_READ_METADATA_DB;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .UNCLOSED_CONTAINER_IO;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .UNSUPPORTED_REQUEST;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CONTAINER_EXISTS;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CONTAINER_INTERNAL_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CONTAINER_NOT_FOUND;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.ERROR_IN_COMPACT_DB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.INVALID_CONFIG;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.IO_EXCEPTION;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.NO_SUCH_ALGORITHM;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNABLE_TO_READ_METADATA_DB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNCLOSED_CONTAINER_IO;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNSUPPORTED_REQUEST;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
index 8d1b17c..3b478cd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
@@ -19,16 +19,16 @@
 package org.apache.hadoop.ozone.container.common.impl;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils;
@@ -48,14 +48,14 @@ import java.security.NoSuchAlgorithmException;
 import java.util.LinkedList;
 import java.util.List;
 
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .CLOSED_CONTAINER_IO;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .GET_SMALL_FILE_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .PUT_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CLOSED_CONTAINER_IO;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.GET_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.NO_SUCH_ALGORITHM;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.PUT_SMALL_FILE_ERROR;
 
 /**
  * Ozone Container dispatcher takes a call from the netty server and routes it

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
index f920ded..0ca7354 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
@@ -23,7 +23,7 @@ import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
@@ -40,8 +40,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
-    .NO_SUCH_KEY;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.NO_SUCH_KEY;
 
 /**
  * Key Manager impl.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
index 26dcf21..c58fb9d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.common.interfaces;
 
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
index 984fe41..7e12614 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.ozone.container.common.interfaces;
 
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
index 7c3fa30..99845fa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
@@ -19,13 +19,14 @@
 package org.apache.hadoop.ozone.container.common.statemachine.background;
 
 import com.google.common.collect.Lists;
-import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.ratis.shaded.com.google.protobuf
+    .InvalidProtocolBufferException;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
index 50e45b4..7105fd7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
@@ -19,13 +19,14 @@
 package org.apache.hadoop.ozone.container.common.transport.server;
 
 import com.google.common.base.Preconditions;
-import io.netty.bootstrap.ServerBootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioServerSocketChannel;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
+import org.apache.ratis.shaded.io.netty.bootstrap.ServerBootstrap;
+import org.apache.ratis.shaded.io.netty.channel.Channel;
+import org.apache.ratis.shaded.io.netty.channel.EventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.socket.nio
+    .NioServerSocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LogLevel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LoggingHandler;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
index 5947dde..3765299 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
@@ -18,11 +18,11 @@
 
 package org.apache.hadoop.ozone.container.common.transport.server;
 
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext;
+import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
index 78ba26b..e405cf9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
@@ -19,14 +19,16 @@
 package org.apache.hadoop.ozone.container.common.transport.server;
 
 import com.google.common.base.Preconditions;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufEncoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.ratis.shaded.io.netty.channel.ChannelInitializer;
+import org.apache.ratis.shaded.io.netty.channel.ChannelPipeline;
+import org.apache.ratis.shaded.io.netty.channel.socket.SocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufEncoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+    .ProtobufVarint32FrameDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+    .ProtobufVarint32LengthFieldPrepender;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 89eaace..56c52bb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -19,13 +19,14 @@
 package org.apache.hadoop.ozone.container.common.transport.server.ratis;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.ratis.shaded.com.google.protobuf
+    .InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .WriteChunkRequestProto;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.ratis.conf.RaftProperties;
@@ -34,7 +35,6 @@ import org.apache.ratis.protocol.RaftClientRequest;
 import org.apache.ratis.protocol.RaftPeerId;
 import org.apache.ratis.server.storage.RaftStorage;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
 import org.apache.ratis.shaded.proto.RaftProtos.LogEntryProto;
 import org.apache.ratis.shaded.proto.RaftProtos.SMLogEntryProto;
 import org.apache.ratis.statemachine.StateMachineStorage;
@@ -159,8 +159,8 @@ public class ContainerStateMachine extends BaseStateMachine {
               .build();
 
       log = SMLogEntryProto.newBuilder()
-          .setData(getShadedByteString(commitContainerCommandProto))
-          .setStateMachineData(getShadedByteString(dataContainerCommandProto))
+          .setData(commitContainerCommandProto.toByteString())
+          .setStateMachineData(dataContainerCommandProto.toByteString())
           .build();
     } else if (proto.getCmdType() == ContainerProtos.Type.CreateContainer) {
       log = SMLogEntryProto.newBuilder()
@@ -175,21 +175,16 @@ public class ContainerStateMachine extends BaseStateMachine {
     return new TransactionContextImpl(this, request, log);
   }
 
-  private ByteString getShadedByteString(ContainerCommandRequestProto proto) {
-    return ShadedProtoUtil.asShadedByteString(proto.toByteArray());
-  }
-
   private ContainerCommandRequestProto getRequestProto(ByteString request)
       throws InvalidProtocolBufferException {
-    return ContainerCommandRequestProto.parseFrom(
-        ShadedProtoUtil.asByteString(request));
+    return ContainerCommandRequestProto.parseFrom(request);
   }
 
   private Message runCommand(ContainerCommandRequestProto requestProto) {
     LOG.trace("dispatch {}", requestProto);
     ContainerCommandResponseProto response = dispatcher.dispatch(requestProto);
     LOG.trace("response {}", response);
-    return () -> ShadedProtoUtil.asShadedByteString(response.toByteArray());
+    return () -> response.toByteString();
   }
 
   private CompletableFuture<Message> handleWriteChunk(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index cefa28c..6027bec 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -26,8 +26,9 @@ import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerLifeCycleState;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index ccc5911..e1a2918 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client.io;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 41ceee4..c6e56b3 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client.io;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
index e7215ef..e74fffd 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.ozone.client.io;
 
 
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
 import org.apache.hadoop.hdds.client.BlockID;
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index fed725c..d2a6434 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -19,19 +19,19 @@
 package org.apache.hadoop.ozone.container;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index a60da21..56fd0b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index e51c3f7..a7cab4e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -22,7 +22,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -36,9 +36,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.util.Time;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -77,7 +75,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper
     .setDataChecksum;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Stage.COMBINED;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 0bba5c1..2921be2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -26,9 +26,11 @@ import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandResponseProto;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 4e1d14b..513974a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.scm.XceiverClient;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index b207914..eb170ea 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -18,11 +18,13 @@
 
 package org.apache.hadoop.ozone.container.server;
 
-import io.netty.channel.embedded.EmbeddedChannel;
+import org.apache.ratis.shaded.io.netty.channel.embedded.EmbeddedChannel;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandResponseProto;
 
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index f56d78c..ce1fe46 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.scm;
 
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0db646d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
index 99742c2..d6f5d32 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
@@ -26,24 +26,22 @@ import java.util.ArrayList;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CountDownLatch;
 
-import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDDS-17. Add node to container map class to simplify state in SCM. Contributed by Anu Engineer.

Posted by xy...@apache.org.
HDDS-17. Add node to container map class to simplify state in SCM.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/992eea51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/992eea51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/992eea51

Branch: refs/heads/HDDS-4
Commit: 992eea516a54764fff5e23da83f30298e880638a
Parents: a66072d
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 09:57:42 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/scm/container/ContainerID.java  |   2 +-
 .../hdds/scm/container/states/package-info.java |   2 +-
 .../hdds/scm/exceptions/SCMException.java       |   4 +-
 .../hdds/scm/node/states/Node2ContainerMap.java | 184 +++++++++++
 .../hdds/scm/node/states/ReportResult.java      |  86 ++++++
 .../hdds/scm/node/states/package-info.java      |  22 ++
 .../scm/node/states/Node2ContainerMapTest.java  | 308 +++++++++++++++++++
 .../hdds/scm/node/states/package-info.java      |  23 ++
 .../ozone/genesis/GenesisMemoryProfiler.java    |   4 +-
 9 files changed, 631 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index 9520c8c..9845c04 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -38,7 +38,7 @@ public class ContainerID implements Comparable {
    */
   public ContainerID(long id) {
     Preconditions.checkState(id > 0,
-        "Container ID should be a positive int");
+        "Container ID should be a positive long. "+ id);
     this.id = id;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
index cf20f39..8ad1c8b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
@@ -17,6 +17,6 @@
  */
 
 /**
- * Container States management package.
+ * Container States package.
  */
 package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index 227df3c..d7d70ef 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -114,6 +114,8 @@ public class SCMException extends IOException {
     FAILED_TO_FIND_BLOCK,
     IO_EXCEPTION,
     UNEXPECTED_CONTAINER_STATE,
-    SCM_NOT_INITIALIZED
+    SCM_NOT_INITIALIZED,
+    DUPLICATE_DATANODE,
+    NO_SUCH_DATANODE
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
new file mode 100644
index 0000000..f850e7a
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE;
+
+/**
+ * This data structure maintains the list of containers that is on a datanode.
+ * This information is built from the DN container reports.
+ */
+public class Node2ContainerMap {
+  private final Map<UUID, Set<ContainerID>> dn2ContainerMap;
+
+  /**
+   * Constructs a Node2ContainerMap Object.
+   */
+  public Node2ContainerMap() {
+    dn2ContainerMap = new ConcurrentHashMap<>();
+  }
+
+  /**
+   * Returns true if this a datanode that is already tracked by
+   * Node2ContainerMap.
+   *
+   * @param datanodeID - UUID of the Datanode.
+   * @return True if this is tracked, false if this map does not know about it.
+   */
+  public boolean isKnownDatanode(UUID datanodeID) {
+    Preconditions.checkNotNull(datanodeID);
+    return dn2ContainerMap.containsKey(datanodeID);
+  }
+
+  /**
+   * Insert a new datanode into Node2Container Map.
+   *
+   * @param datanodeID -- Datanode UUID
+   * @param containerIDs - List of ContainerIDs.
+   */
+  public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
+      throws SCMException {
+    Preconditions.checkNotNull(containerIDs);
+    Preconditions.checkNotNull(datanodeID);
+    if(dn2ContainerMap.putIfAbsent(datanodeID, containerIDs) != null) {
+      throw new SCMException("Node already exists in the map",
+                  DUPLICATE_DATANODE);
+    }
+  }
+
+  /**
+   * Updates the Container list of an existing DN.
+   *
+   * @param datanodeID - UUID of DN.
+   * @param containers - Set of Containers tht is present on DN.
+   * @throws SCMException - if we don't know about this datanode, for new DN
+   *                      use insertNewDatanode.
+   */
+  public void updateDatanodeMap(UUID datanodeID, Set<ContainerID> containers)
+      throws SCMException {
+    Preconditions.checkNotNull(datanodeID);
+    Preconditions.checkNotNull(containers);
+    if(dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> v) == null){
+      throw new SCMException("No such datanode", NO_SUCH_DATANODE);
+    }
+  }
+
+  /**
+   * Removes datanode Entry from the map
+   * @param datanodeID - Datanode ID.
+   */
+  public void removeDatanode(UUID datanodeID) {
+    Preconditions.checkNotNull(datanodeID);
+    dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null);
+  }
+
+  /**
+   * Returns null if there no containers associated with this datanode ID.
+   *
+   * @param datanode - UUID
+   * @return Set of containers or Null.
+   */
+  public Set<ContainerID> getContainers(UUID datanode) {
+    Preconditions.checkNotNull(datanode);
+    return dn2ContainerMap.computeIfPresent(datanode, (k, v) ->
+        Collections.unmodifiableSet(v));
+  }
+
+  public ReportResult processReport(UUID datanodeID, Set<ContainerID>
+      containers) {
+    Preconditions.checkNotNull(datanodeID);
+    Preconditions.checkNotNull(containers);
+
+    if (!isKnownDatanode(datanodeID)) {
+      return ReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.NEW_DATANODE_FOUND)
+          .setNewContainers(containers)
+          .build();
+    }
+
+    // Conditions like Zero length containers should be handled by removeAll.
+    Set<ContainerID> currentSet = dn2ContainerMap.get(datanodeID);
+    TreeSet<ContainerID> newContainers = new TreeSet<>(containers);
+    newContainers.removeAll(currentSet);
+
+    TreeSet<ContainerID> missingContainers = new TreeSet<>(currentSet);
+    missingContainers.removeAll(containers);
+
+    if (newContainers.isEmpty() && missingContainers.isEmpty()) {
+      return ReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.ALL_IS_WELL)
+          .build();
+    }
+
+    if (newContainers.isEmpty() && !missingContainers.isEmpty()) {
+      return ReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.MISSING_CONTAINERS)
+          .setMissingContainers(missingContainers)
+          .build();
+    }
+
+    if (!newContainers.isEmpty() && missingContainers.isEmpty()) {
+      return ReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.NEW_CONTAINERS_FOUND)
+          .setNewContainers(newContainers)
+          .build();
+    }
+
+    if (!newContainers.isEmpty() && !missingContainers.isEmpty()) {
+      return ReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND)
+          .setNewContainers(newContainers)
+          .setMissingContainers(missingContainers)
+          .build();
+    }
+
+    // default status & Make compiler happy
+    return ReportResult.ReportResultBuilder.newBuilder()
+        .setStatus(ReportStatus.ALL_IS_WELL)
+        .build();
+  }
+
+
+
+
+
+  /**
+   * Results possible from processing a container report by
+   * Node2ContainerMapper.
+   */
+  public enum ReportStatus {
+    ALL_IS_WELL,
+    MISSING_CONTAINERS,
+    NEW_CONTAINERS_FOUND,
+    MISSING_AND_NEW_CONTAINERS_FOUND,
+    NEW_DATANODE_FOUND
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
new file mode 100644
index 0000000..cb06cb3
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+
+import java.util.Set;
+
+/**
+ * A Container Report gets processsed by the Node2Container and returns the
+ * Report Result class.
+ */
+public class ReportResult {
+  private Node2ContainerMap.ReportStatus status;
+  private Set<ContainerID> missingContainers;
+  private Set<ContainerID> newContainers;
+
+  ReportResult(Node2ContainerMap.ReportStatus status,
+      Set<ContainerID> missingContainers,
+      Set<ContainerID> newContainers) {
+    this.status = status;
+    this.missingContainers = missingContainers;
+    this.newContainers = newContainers;
+  }
+
+  public Node2ContainerMap.ReportStatus getStatus() {
+    return status;
+  }
+
+  public Set<ContainerID> getMissingContainers() {
+    return missingContainers;
+  }
+
+  public Set<ContainerID> getNewContainers() {
+    return newContainers;
+  }
+
+  static class ReportResultBuilder {
+    private Node2ContainerMap.ReportStatus status;
+    private Set<ContainerID> missingContainers;
+    private Set<ContainerID> newContainers;
+
+    static ReportResultBuilder newBuilder() {
+      return new ReportResultBuilder();
+    }
+
+    public ReportResultBuilder setStatus(
+        Node2ContainerMap.ReportStatus newstatus) {
+      this.status = newstatus;
+      return this;
+    }
+
+    public ReportResultBuilder setMissingContainers(
+        Set<ContainerID> missingContainersLit) {
+      this.missingContainers = missingContainersLit;
+      return this;
+    }
+
+    public ReportResultBuilder setNewContainers(
+        Set<ContainerID> newContainersList) {
+      this.newContainers = newContainersList;
+      return this;
+    }
+
+    ReportResult build() {
+      return new ReportResult(status, missingContainers, newContainers);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
new file mode 100644
index 0000000..c429c5c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+/**
+ * Node States package.
+ */
+package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
new file mode 100644
index 0000000..79f1b40
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Test classes for Node2ContainerMap.
+ */
+public class Node2ContainerMapTest {
+  private final static int DATANODE_COUNT = 300;
+  private final static int CONTAINER_COUNT = 1000;
+  private final Map<UUID, TreeSet<ContainerID>> testData = new
+      ConcurrentHashMap<>();
+
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private void generateData() {
+    for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
+      TreeSet<ContainerID> currentSet = new TreeSet<>();
+      for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
+        long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
+        currentSet.add(new ContainerID(currentCnIndex));
+      }
+      testData.put(UUID.randomUUID(), currentSet);
+    }
+  }
+
+  private UUID getFirstKey() {
+    return testData.keySet().iterator().next();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    generateData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testIsKnownDatanode() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID knownNode = getFirstKey();
+    UUID unknownNode = UUID.randomUUID();
+    Set<ContainerID> containerIDs = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, containerIDs);
+    Assert.assertTrue("Not able to detect a known node",
+        map.isKnownDatanode(knownNode));
+    Assert.assertFalse("Unknown node detected",
+        map.isKnownDatanode(unknownNode));
+  }
+
+  @Test
+  public void testInsertNewDatanode() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID knownNode = getFirstKey();
+    Set<ContainerID> containerIDs = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, containerIDs);
+    Set<ContainerID> readSet = map.getContainers(knownNode);
+
+    // Assert that all elements are present in the set that we read back from
+    // node map.
+    Set newSet = new TreeSet((readSet));
+    Assert.assertTrue(newSet.removeAll(containerIDs));
+    Assert.assertTrue(newSet.size() == 0);
+
+    thrown.expect(SCMException.class);
+    thrown.expectMessage("already exists");
+    map.insertNewDatanode(knownNode, containerIDs);
+
+    map.removeDatanode(knownNode);
+    map.insertNewDatanode(knownNode, containerIDs);
+
+  }
+
+  @Test
+  public void testProcessReportCheckOneNode() throws SCMException {
+    UUID key = getFirstKey();
+    Set<ContainerID> values = testData.get(key);
+    Node2ContainerMap map = new Node2ContainerMap();
+    map.insertNewDatanode(key, values);
+    Assert.assertTrue(map.isKnownDatanode(key));
+    ReportResult result = map.processReport(key, values);
+    Assert.assertEquals(result.getStatus(),
+        Node2ContainerMap.ReportStatus.ALL_IS_WELL);
+  }
+
+  @Test
+  public void testProcessReportInsertAll() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+
+    for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet()) {
+      map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
+    }
+    // Assert all Keys are known datanodes.
+    for (UUID key : testData.keySet()) {
+      Assert.assertTrue(map.isKnownDatanode(key));
+    }
+  }
+
+  /*
+  For ProcessReport we have to test the following scenarios.
+
+  1. New Datanode - A new datanode appears and we have to add that to the
+  SCM's Node2Container Map.
+
+  2.  New Container - A Datanode exists, but a new container is added to that
+   DN. We need to detect that and return a list of added containers.
+
+  3. Missing Container - A Datanode exists, but one of the expected container
+   on that datanode is missing. We need to detect that.
+
+   4. We get a container report that has both the missing and new containers.
+    We need to return separate lists for these.
+   */
+
+  /**
+   * Assert that we are able to detect the addition of a new datanode.
+   *
+   * @throws SCMException
+   */
+  @Test
+  public void testProcessReportDetectNewDataNode() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    // If we attempt to process a node that is not present in the map,
+    // we get a result back that says, NEW_NODE_FOUND.
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    ReportResult result = map.processReport(key, values);
+    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_DATANODE_FOUND,
+        result.getStatus());
+    Assert.assertEquals(result.getNewContainers().size(), values.size());
+  }
+
+  /**
+   * This test asserts that processReport is able to detect new containers
+   * when it is added to a datanode. For that we populate the DN with a list
+   * of containerIDs and then add few more containers and make sure that we
+   * are able to detect them.
+   *
+   * @throws SCMException
+   */
+  @Test
+  public void testProcessReportDetectNewContainers() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    map.insertNewDatanode(key, values);
+
+    final int newCount = 100;
+    // This is not a mistake, the treeset seems to be reverse sorted.
+    ContainerID last = values.pollFirst();
+    TreeSet<ContainerID> addedContainers = new TreeSet<>();
+    for (int x = 1; x <= newCount; x++) {
+      long cTemp = last.getId() + x;
+      addedContainers.add(new ContainerID(cTemp));
+    }
+
+    // This set is the super set of existing containers and new containers.
+    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
+    newContainersSet.addAll(addedContainers);
+
+    ReportResult result = map.processReport(key, newContainersSet);
+
+    //Assert that expected size of missing container is same as addedContainers
+    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_CONTAINERS_FOUND,
+        result.getStatus());
+
+    Assert.assertEquals(addedContainers.size(),
+        result.getNewContainers().size());
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All objects are not removed.",
+        result.getNewContainers().removeAll(addedContainers));
+  }
+
+  /**
+   * This test asserts that processReport is able to detect missing containers
+   * if they are misssing from a list.
+   *
+   * @throws SCMException
+   */
+  @Test
+  public void testProcessReportDetectMissingContainers() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    map.insertNewDatanode(key, values);
+
+    final int removeCount = 100;
+    Random r = new Random();
+
+    ContainerID first = values.pollLast();
+    TreeSet<ContainerID> removedContainers = new TreeSet<>();
+
+    // Pick a random container to remove it is ok to collide no issues.
+    for (int x = 0; x < removeCount; x++) {
+      int startBase = (int) first.getId();
+      long cTemp = r.nextInt(values.size());
+      removedContainers.add(new ContainerID(cTemp + startBase));
+    }
+
+    // This set is a new set with some containers removed.
+    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
+    newContainersSet.removeAll(removedContainers);
+
+    ReportResult result = map.processReport(key, newContainersSet);
+
+
+    //Assert that expected size of missing container is same as addedContainers
+    Assert.assertEquals(Node2ContainerMap.ReportStatus.MISSING_CONTAINERS,
+        result.getStatus());
+    Assert.assertEquals(removedContainers.size(),
+        result.getMissingContainers().size());
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All missing containers not found.",
+        result.getMissingContainers().removeAll(removedContainers));
+  }
+
+  @Test
+  public void testProcessReportDetectNewAndMissingContainers() throws
+      SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    map.insertNewDatanode(key, values);
+
+    Set<ContainerID> insertedSet = new TreeSet<>();
+    // Insert nodes from 1..30
+    for (int x = 1; x <= 30; x++) {
+      insertedSet.add(new ContainerID(x));
+    }
+
+
+    final int removeCount = 100;
+    Random r = new Random();
+
+    ContainerID first = values.pollLast();
+    TreeSet<ContainerID> removedContainers = new TreeSet<>();
+
+    // Pick a random container to remove it is ok to collide no issues.
+    for (int x = 0; x < removeCount; x++) {
+      int startBase = (int) first.getId();
+      long cTemp = r.nextInt(values.size());
+      removedContainers.add(new ContainerID(cTemp + startBase));
+    }
+
+    Set<ContainerID> newSet = new TreeSet<>(values);
+    newSet.addAll(insertedSet);
+    newSet.removeAll(removedContainers);
+
+    ReportResult result = map.processReport(key, newSet);
+
+
+    Assert.assertEquals(
+        Node2ContainerMap.ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND,
+        result.getStatus());
+    Assert.assertEquals(removedContainers.size(),
+        result.getMissingContainers().size());
+
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All missing containers not found.",
+        result.getMissingContainers().removeAll(removedContainers));
+
+    Assert.assertEquals(insertedSet.size(),
+        result.getNewContainers().size());
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All inserted containers are not found.",
+        result.getNewContainers().removeAll(insertedSet));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
new file mode 100644
index 0000000..6610fcd
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * Test Node2Container Map.
+ */
+package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992eea51/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
index 090f1a7..8ba19fc 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.genesis;
 
+import org.apache.hadoop.conf.StorageUnit;
 import org.openjdk.jmh.infra.BenchmarkParams;
 import org.openjdk.jmh.infra.IterationParams;
 import org.openjdk.jmh.profile.InternalProfiler;
@@ -46,7 +47,8 @@ public class GenesisMemoryProfiler implements InternalProfiler {
     long totalHeap = Runtime.getRuntime().totalMemory();
 
     Collection<ScalarResult> samples = new ArrayList<>();
-    samples.add(new ScalarResult("Max heap", totalHeap, "bytes",
+    samples.add(new ScalarResult("Max heap",
+        StorageUnit.BYTES.toGBs(totalHeap), "GBs",
         AggregationPolicy.MAX));
     return samples;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-8249. Fixed few REST APIs in RMWebServices to have static-user check. Contributed by Sunil Govindan.

Posted by xy...@apache.org.
YARN-8249. Fixed few REST APIs in RMWebServices to have static-user check. Contributed by Sunil Govindan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/082bcd4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/082bcd4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/082bcd4a

Branch: refs/heads/HDDS-4
Commit: 082bcd4a3a5aa2f7f3b7ce6806527cde0407c825
Parents: e9a1ed7
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Authored: Thu May 10 19:03:23 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../webapp/AHSWebServices.java                  |  18 +-
 .../hadoop/yarn/server/webapp/WebServices.java  |   2 +-
 .../resourcemanager/webapp/RMWebServices.java   | 302 ++++++++-----------
 .../webapp/TestRMWebServices.java               |   2 +-
 ...tRMWebServicesHttpStaticUserPermissions.java |  12 +-
 5 files changed, 142 insertions(+), 194 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/082bcd4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 755127b..9aa71a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -111,7 +111,7 @@ public class AHSWebServices extends WebServices {
   public TimelineAbout about(
       @Context HttpServletRequest req,
       @Context HttpServletResponse res) {
-    init(res);
+    initForReadableEndpoints(res);
     return TimelineUtils.createTimelineAbout("Generic History Service API");
   }
 
@@ -141,7 +141,7 @@ public class AHSWebServices extends WebServices {
       @QueryParam("finishedTimeBegin") String finishBegin,
       @QueryParam("finishedTimeEnd") String finishEnd,
       @QueryParam("applicationTypes") Set<String> applicationTypes) {
-    init(res);
+    initForReadableEndpoints(res);
     validateStates(stateQuery, statesQuery);
     return super.getApps(req, res, stateQuery, statesQuery, finalStatusQuery,
       userQuery, queueQuery, count, startedBegin, startedEnd, finishBegin,
@@ -155,7 +155,7 @@ public class AHSWebServices extends WebServices {
   @Override
   public AppInfo getApp(@Context HttpServletRequest req,
       @Context HttpServletResponse res, @PathParam("appid") String appId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getApp(req, res, appId);
   }
 
@@ -166,7 +166,7 @@ public class AHSWebServices extends WebServices {
   @Override
   public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest req,
       @Context HttpServletResponse res, @PathParam("appid") String appId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getAppAttempts(req, res, appId);
   }
 
@@ -178,7 +178,7 @@ public class AHSWebServices extends WebServices {
   public AppAttemptInfo getAppAttempt(@Context HttpServletRequest req,
       @Context HttpServletResponse res, @PathParam("appid") String appId,
       @PathParam("appattemptid") String appAttemptId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getAppAttempt(req, res, appId, appAttemptId);
   }
 
@@ -190,7 +190,7 @@ public class AHSWebServices extends WebServices {
   public ContainersInfo getContainers(@Context HttpServletRequest req,
       @Context HttpServletResponse res, @PathParam("appid") String appId,
       @PathParam("appattemptid") String appAttemptId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getContainers(req, res, appId, appAttemptId);
   }
 
@@ -203,7 +203,7 @@ public class AHSWebServices extends WebServices {
       @Context HttpServletResponse res, @PathParam("appid") String appId,
       @PathParam("appattemptid") String appAttemptId,
       @PathParam("containerid") String containerId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getContainer(req, res, appId, appAttemptId, containerId);
   }
 
@@ -257,7 +257,7 @@ public class AHSWebServices extends WebServices {
       @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE)
       @DefaultValue("false") boolean redirected_from_node) {
     ContainerId containerId = null;
-    init(res);
+    initForReadableEndpoints(res);
     try {
       containerId = ContainerId.fromString(containerIdStr);
     } catch (IllegalArgumentException e) {
@@ -392,7 +392,7 @@ public class AHSWebServices extends WebServices {
       @QueryParam(YarnWebServiceParams.NM_ID) String nmId,
       @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE)
       @DefaultValue("false") boolean redirected_from_node) {
-    init(res);
+    initForReadableEndpoints(res);
     ContainerId containerId;
     try {
       containerId = ContainerId.fromString(containerIdStr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/082bcd4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index 1399099..df4656f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -387,7 +387,7 @@ public class WebServices {
     return new ContainerInfo(container);
   }
 
-  protected void init(HttpServletResponse response) {
+  protected void initForReadableEndpoints(HttpServletResponse response) {
     // clear content type
     response.setContentType(null);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/082bcd4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 0564b67..69c9562 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -272,9 +272,49 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     return true;
   }
 
-  private void init() {
+  /**
+   * initForReadableEndpoints does the init for all readable REST end points.
+   */
+  private void initForReadableEndpoints() {
+    // clear content type
+    response.setContentType(null);
+  }
+
+  /**
+   * initForWritableEndpoints does the init and acls verification for all
+   * writable REST end points.
+   *
+   * @param callerUGI
+   *          remote caller who initiated the request
+   * @param doAdminACLsCheck
+   *          boolean flag to indicate whether ACLs check is needed
+   * @throws AuthorizationException
+   *           in case of no access to perfom this op.
+   */
+  private void initForWritableEndpoints(UserGroupInformation callerUGI,
+      boolean doAdminACLsCheck) throws AuthorizationException {
     // clear content type
     response.setContentType(null);
+
+    if (callerUGI == null) {
+      String msg = "Unable to obtain user name, user not authenticated";
+      throw new AuthorizationException(msg);
+    }
+
+    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
+      String msg = "The default static user cannot carry out this operation.";
+      throw new ForbiddenException(msg);
+    }
+
+    if (doAdminACLsCheck) {
+      ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
+      if (aclsManager.areACLsEnabled()) {
+        if (!aclsManager.isAdmin(callerUGI)) {
+          String msg = "Only admins can carry out this operation.";
+          throw new ForbiddenException(msg);
+        }
+      }
+    }
   }
 
   @GET
@@ -291,7 +331,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
   public ClusterInfo getClusterInfo() {
-    init();
+    initForReadableEndpoints();
     return new ClusterInfo(this.rm);
   }
 
@@ -301,7 +341,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
   public ClusterMetricsInfo getClusterMetricsInfo() {
-    init();
+    initForReadableEndpoints();
     return new ClusterMetricsInfo(this.rm);
   }
 
@@ -311,7 +351,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
   public SchedulerTypeInfo getSchedulerInfo() {
-    init();
+    initForReadableEndpoints();
+
     ResourceScheduler rs = rm.getResourceScheduler();
     SchedulerInfo sinfo;
     if (rs instanceof CapacityScheduler) {
@@ -336,15 +377,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public String dumpSchedulerLogs(@FormParam(RMWSConsts.TIME) String time,
       @Context HttpServletRequest hsr) throws IOException {
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
-    if (aclsManager.areACLsEnabled()) {
-      if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
-        String msg = "Only admins can carry out this operation.";
-        throw new ForbiddenException(msg);
-      }
-    }
+    initForWritableEndpoints(callerUGI, true);
+
     ResourceScheduler rs = rm.getResourceScheduler();
     int period = Integer.parseInt(time);
     if (period <= 0) {
@@ -370,7 +405,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
   public NodesInfo getNodes(@QueryParam(RMWSConsts.STATES) String states) {
-    init();
+    initForReadableEndpoints();
+
     ResourceScheduler sched = this.rm.getResourceScheduler();
     if (sched == null) {
       throw new NotFoundException("Null ResourceScheduler instance");
@@ -409,7 +445,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
   public NodeInfo getNode(@PathParam(RMWSConsts.NODEID) String nodeId) {
-    init();
+    initForReadableEndpoints();
+
     if (nodeId == null || nodeId.isEmpty()) {
       throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
     }
@@ -453,6 +490,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> applicationTypes,
       @QueryParam(RMWSConsts.APPLICATION_TAGS) Set<String> applicationTags,
       @QueryParam(RMWSConsts.DESELECTS) Set<String> unselectedFields) {
+
+    initForReadableEndpoints();
+
     boolean checkCount = false;
     boolean checkStart = false;
     boolean checkEnd = false;
@@ -467,7 +507,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     long fBegin = 0;
     long fEnd = Long.MAX_VALUE;
 
-    init();
     if (count != null && !count.isEmpty()) {
       checkCount = true;
       countNum = Long.parseLong(count);
@@ -633,8 +672,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public ActivitiesInfo getActivities(@Context HttpServletRequest hsr,
       @QueryParam(RMWSConsts.NODEID) String nodeId) {
-    YarnScheduler scheduler = rm.getRMContext().getScheduler();
+    initForReadableEndpoints();
 
+    YarnScheduler scheduler = rm.getRMContext().getScheduler();
     if (scheduler instanceof AbstractYarnScheduler) {
       String errMessage = "";
 
@@ -706,8 +746,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr,
       @QueryParam(RMWSConsts.APP_ID) String appId,
       @QueryParam(RMWSConsts.MAX_TIME) String time) {
-    YarnScheduler scheduler = rm.getRMContext().getScheduler();
+    initForReadableEndpoints();
 
+    YarnScheduler scheduler = rm.getRMContext().getScheduler();
     if (scheduler instanceof AbstractYarnScheduler) {
       AbstractYarnScheduler abstractYarnScheduler =
           (AbstractYarnScheduler) scheduler;
@@ -760,7 +801,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr,
       @QueryParam(RMWSConsts.STATES) Set<String> stateQueries,
       @QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> typeQueries) {
-    init();
+    initForReadableEndpoints();
 
     // parse the params and build the scoreboard
     // converting state/type name to lowercase
@@ -847,7 +888,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   public AppInfo getApp(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId,
       @QueryParam(RMWSConsts.DESELECTS) Set<String> unselectedFields) {
-    init();
+    initForReadableEndpoints();
+
     ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId);
     RMApp app = rm.getRMContext().getRMApps().get(id);
     if (app == null) {
@@ -868,8 +910,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) {
+    initForReadableEndpoints();
 
-    init();
     ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId);
     RMApp app = rm.getRMContext().getRMApps().get(id);
     if (app == null) {
@@ -895,7 +937,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest req, @Context HttpServletResponse res,
       @PathParam(RMWSConsts.APPID) String appId,
       @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getAppAttempt(req, res, appId, appAttemptId);
   }
 
@@ -908,7 +950,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletResponse res,
       @PathParam(RMWSConsts.APPID) String appId,
       @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getContainers(req, res, appId, appAttemptId);
   }
 
@@ -922,7 +964,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @PathParam(RMWSConsts.APPID) String appId,
       @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId,
       @PathParam("containerid") String containerId) {
-    init(res);
+    initForReadableEndpoints(res);
     return super.getContainer(req, res, appId, appAttemptId, containerId);
   }
 
@@ -933,7 +975,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public AppState getAppState(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
-    init();
+    initForReadableEndpoints();
+
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     String userName = "";
     if (callerUGI != null) {
@@ -969,18 +1012,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
       YarnException, InterruptedException, IOException {
-
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      String msg = "Unable to obtain user name, user not authenticated";
-      throw new AuthorizationException(msg);
-    }
-
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     String userName = callerUGI.getUserName();
     RMApp app = null;
@@ -1019,7 +1052,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public NodeToLabelsInfo getNodeToLabels(@Context HttpServletRequest hsr)
       throws IOException {
-    init();
+    initForReadableEndpoints();
 
     NodeToLabelsInfo ntl = new NodeToLabelsInfo();
     HashMap<String, NodeLabelsInfo> ntlMap = ntl.getNodeToLabels();
@@ -1041,7 +1074,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public LabelsToNodesInfo getLabelsToNodes(
       @QueryParam(RMWSConsts.LABELS) Set<String> labels) throws IOException {
-    init();
+    initForReadableEndpoints();
 
     LabelsToNodesInfo lts = new LabelsToNodesInfo();
     Map<NodeLabelInfo, NodeIDsInfo> ltsMap = lts.getLabelsToNodes();
@@ -1073,6 +1106,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   public Response replaceLabelsOnNodes(
       final NodeToLabelsEntryList newNodeToLabels,
       @Context HttpServletRequest hsr) throws IOException {
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    initForWritableEndpoints(callerUGI, false);
+
     Map<NodeId, Set<String>> nodeIdToLabels =
         new HashMap<NodeId, Set<String>>();
 
@@ -1094,6 +1130,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @QueryParam("labels") Set<String> newNodeLabelsName,
       @Context HttpServletRequest hsr, @PathParam("nodeId") String nodeId)
       throws Exception {
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    initForWritableEndpoints(callerUGI, false);
+
     NodeId nid = ConverterUtils.toNodeIdWithDefaultPort(nodeId);
     Map<NodeId, Set<String>> newLabelsForNode =
         new HashMap<NodeId, Set<String>>();
@@ -1106,7 +1145,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   private Response replaceLabelsOnNode(
       Map<NodeId, Set<String>> newLabelsForNode, HttpServletRequest hsr,
       String operation) throws IOException {
-    init();
 
     NodeLabelsUtils.verifyCentralizedNodeLabelConfEnabled("replaceLabelsOnNode",
         isCentralizedNodeLabelConfiguration);
@@ -1140,7 +1178,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public NodeLabelsInfo getClusterNodeLabels(@Context HttpServletRequest hsr)
       throws IOException {
-    init();
+    initForReadableEndpoints();
 
     List<NodeLabel> nodeLabels =
         rm.getRMContext().getNodeLabelManager().getClusterNodeLabels();
@@ -1156,14 +1194,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public Response addToClusterNodeLabels(final NodeLabelsInfo newNodeLabels,
       @Context HttpServletRequest hsr) throws Exception {
-    init();
-
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      String msg = "Unable to obtain user name, user not authenticated for"
-          + " post to .../add-node-labels";
-      throw new AuthorizationException(msg);
-    }
+    initForWritableEndpoints(callerUGI, false);
+
     if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) {
       String msg = "User " + callerUGI.getShortUserName() + " not authorized"
           + " for post to .../add-node-labels ";
@@ -1189,14 +1222,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   public Response removeFromCluserNodeLabels(
       @QueryParam(RMWSConsts.LABELS) Set<String> oldNodeLabels,
       @Context HttpServletRequest hsr) throws Exception {
-    init();
-
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      String msg = "Unable to obtain user name, user not authenticated for"
-          + " post to .../remove-node-labels";
-      throw new AuthorizationException(msg);
-    }
+    initForWritableEndpoints(callerUGI, false);
+
     if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) {
       String msg = "User " + callerUGI.getShortUserName() + " not authorized"
           + " for post to .../remove-node-labels ";
@@ -1220,7 +1248,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public NodeLabelsInfo getLabelsOnNode(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.NODEID) String nodeId) throws IOException {
-    init();
+    initForReadableEndpoints();
 
     NodeId nid = ConverterUtils.toNodeIdWithDefaultPort(nodeId);
     List<NodeLabel> labels = new ArrayList<NodeLabel>(
@@ -1290,7 +1318,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public AppPriority getAppPriority(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
-    init();
+    initForReadableEndpoints();
+
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     String userName = "UNKNOWN-USER";
     if (callerUGI != null) {
@@ -1322,21 +1351,11 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
       YarnException, InterruptedException, IOException {
-    init();
-    if (targetPriority == null) {
-      throw new YarnException("Target Priority cannot be null");
-    }
-
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, user not authenticated");
-    }
+    initForWritableEndpoints(callerUGI, false);
 
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      return Response.status(Status.FORBIDDEN)
-          .entity("The default static user cannot carry out this operation.")
-          .build();
+    if (targetPriority == null) {
+      throw new YarnException("Target Priority cannot be null");
     }
 
     String userName = callerUGI.getUserName();
@@ -1407,7 +1426,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public AppQueue getAppQueue(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
-    init();
+    initForReadableEndpoints();
+
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     String userName = "UNKNOWN-USER";
     if (callerUGI != null) {
@@ -1440,17 +1460,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
       YarnException, InterruptedException, IOException {
 
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      String msg = "Unable to obtain user name, user not authenticated";
-      throw new AuthorizationException(msg);
-    }
-
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     String userName = callerUGI.getUserName();
     RMApp app = null;
@@ -1561,16 +1572,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public Response createNewApplication(@Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, " + "user not authenticated");
-    }
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     NewApplication appId = createNewApplication();
     return Response.status(Status.OK).entity(appId).build();
@@ -1590,17 +1593,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
 
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, " + "user not authenticated");
-    }
-
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     ApplicationSubmissionContext appContext =
         RMWebAppUtil.createAppSubmissionContext(newApp, conf);
@@ -1654,14 +1648,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     return appId;
   }
 
-  private UserGroupInformation createKerberosUserGroupInformation(
-      HttpServletRequest hsr) throws AuthorizationException, YarnException {
-
-    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      String msg = "Unable to obtain user name, user not authenticated";
-      throw new AuthorizationException(msg);
-    }
+  private void createKerberosUserGroupInformation(HttpServletRequest hsr,
+      UserGroupInformation callerUGI)
+      throws AuthorizationException, YarnException {
 
     String authType = hsr.getAuthType();
     if (!KerberosAuthenticationHandler.TYPE.equalsIgnoreCase(authType)) {
@@ -1672,14 +1661,10 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     }
     if (hsr.getAttribute(
         DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE) != null) {
-      String msg =
-          "Delegation token operations cannot be carried out using delegation"
-              + " token authentication.";
+      String msg = "Delegation token operations cannot be carried out using "
+          + "delegation token authentication.";
       throw new YarnException(msg);
     }
-
-    callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
-    return callerUGI;
   }
 
   @POST
@@ -1692,10 +1677,12 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr) throws AuthorizationException,
       IOException, InterruptedException, Exception {
 
-    init();
-    UserGroupInformation callerUGI;
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    initForWritableEndpoints(callerUGI, false);
+
     try {
-      callerUGI = createKerberosUserGroupInformation(hsr);
+      createKerberosUserGroupInformation(hsr, callerUGI);
+      callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
     } catch (YarnException ye) {
       return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build();
     }
@@ -1712,10 +1699,12 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       throws AuthorizationException, IOException, InterruptedException,
       Exception {
 
-    init();
-    UserGroupInformation callerUGI;
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    initForWritableEndpoints(callerUGI, false);
+
     try {
-      callerUGI = createKerberosUserGroupInformation(hsr);
+      createKerberosUserGroupInformation(hsr, callerUGI);
+      callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
     } catch (YarnException ye) {
       return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build();
     }
@@ -1827,10 +1816,12 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       throws AuthorizationException, IOException, InterruptedException,
       Exception {
 
-    init();
-    UserGroupInformation callerUGI;
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    initForWritableEndpoints(callerUGI, false);
+
     try {
-      callerUGI = createKerberosUserGroupInformation(hsr);
+      createKerberosUserGroupInformation(hsr, callerUGI);
+      callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
     } catch (YarnException ye) {
       return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build();
     }
@@ -1904,16 +1895,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public Response createNewReservation(@Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, " + "user not authenticated");
-    }
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     NewReservation reservationId = createNewReservation();
     return Response.status(Status.OK).entity(reservationId).build();
@@ -1953,16 +1936,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
 
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, " + "user not authenticated");
-    }
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     final ReservationSubmissionRequest reservation =
         createReservationSubmissionRequest(resContext);
@@ -2051,16 +2026,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
 
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, " + "user not authenticated");
-    }
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     final ReservationUpdateRequest reservation =
         createReservationUpdateRequest(resContext);
@@ -2150,16 +2117,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
 
-    init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, " + "user not authenticated");
-    }
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      String msg = "The default static user cannot carry out this operation.";
-      return Response.status(Status.FORBIDDEN).entity(msg).build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     final ReservationDeleteRequest reservation =
         createReservationDeleteRequest(resContext);
@@ -2207,7 +2166,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @QueryParam(RMWSConsts.END_TIME) @DefaultValue(DEFAULT_END_TIME) long endTime,
       @QueryParam(RMWSConsts.INCLUDE_RESOURCE) @DefaultValue(DEFAULT_INCLUDE_RESOURCE) boolean includeResourceAllocations,
       @Context HttpServletRequest hsr) throws Exception {
-    init();
+    initForReadableEndpoints();
 
     final ReservationListRequest request = ReservationListRequest.newInstance(
         queue, reservationId, startTime, endTime, includeResourceAllocations);
@@ -2253,7 +2212,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   public AppTimeoutInfo getAppTimeout(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId,
       @PathParam(RMWSConsts.TYPE) String type) throws AuthorizationException {
-    init();
+    initForReadableEndpoints();
     RMApp app = validateAppTimeoutRequest(hsr, appId);
 
     ApplicationTimeoutType appTimeoutType = parseTimeoutType(type);
@@ -2297,7 +2256,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   @Override
   public AppTimeoutsInfo getAppTimeouts(@Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
-    init();
+    initForReadableEndpoints();
 
     RMApp app = validateAppTimeoutRequest(hsr, appId);
 
@@ -2355,19 +2314,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @Context HttpServletRequest hsr,
       @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
       YarnException, InterruptedException, IOException {
-    init();
 
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    if (callerUGI == null) {
-      throw new AuthorizationException(
-          "Unable to obtain user name, user not authenticated");
-    }
-
-    if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
-      return Response.status(Status.FORBIDDEN)
-          .entity("The default static user cannot carry out this operation.")
-          .build();
-    }
+    initForWritableEndpoints(callerUGI, false);
 
     String userName = callerUGI.getUserName();
     RMApp app = null;
@@ -2480,16 +2429,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   public synchronized Response updateSchedulerConfiguration(SchedConfUpdateInfo
       mutationInfo, @Context HttpServletRequest hsr)
       throws AuthorizationException, InterruptedException {
-    init();
 
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
-    ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
-    if (aclsManager.areACLsEnabled()) {
-      if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
-        String msg = "Only admins can carry out this operation.";
-        throw new ForbiddenException(msg);
-      }
-    }
+    initForWritableEndpoints(callerUGI, true);
 
     ResourceScheduler scheduler = rm.getResourceScheduler();
     if (scheduler instanceof MutableConfScheduler && ((MutableConfScheduler)
@@ -2541,7 +2483,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @QueryParam(RMWSConsts.QUEUE_ACL_TYPE)
         @DefaultValue("SUBMIT_APPLICATIONS") String queueAclType,
       @Context HttpServletRequest hsr) throws AuthorizationException {
-    init();
+    initForReadableEndpoints();
 
     // For the user who invokes this REST call, he/she should have admin access
     // to the queue. Otherwise we will reject the call.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/082bcd4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 9c4acc2..0702d65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -684,7 +684,7 @@ public class TestRMWebServices extends JerseyTestBase {
 
     ResourceManager mockRM = mock(ResourceManager.class);
     Configuration conf = new YarnConfiguration();
-    HttpServletRequest mockHsr = mock(HttpServletRequest.class);
+    HttpServletRequest mockHsr = mockHttpServletRequestByUserName("non-admin");
     ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
     when(mockRM.getApplicationACLsManager()).thenReturn(aclsManager);
     RMWebServices webSvc =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/082bcd4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
index 60c6f5e..cef32f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.codehaus.jettison.json.JSONObject;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -179,15 +180,20 @@ public class TestRMWebServicesHttpStaticUserPermissions {
         assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
         InputStream errorStream = conn.getErrorStream();
         String error = "";
-        BufferedReader reader =
-            new BufferedReader(new InputStreamReader(errorStream, "UTF8"));
+        BufferedReader reader = new BufferedReader(
+            new InputStreamReader(errorStream, "UTF8"));
         for (String line; (line = reader.readLine()) != null;) {
           error += line;
         }
         reader.close();
         errorStream.close();
+        JSONObject errResponse = new JSONObject(error);
+        JSONObject remoteException = errResponse
+            .getJSONObject("RemoteException");
         assertEquals(
-          "The default static user cannot carry out this operation.", error);
+            "java.lang.Exception: The default static user cannot carry out "
+            + "this operation.",
+            remoteException.getString("message"));
       }
       conn.disconnect();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-8247 Incorrect HTTP status code returned by ATSv2 for non-whitelisted users. Contributed by Rohith Sharma K S

Posted by xy...@apache.org.
YARN-8247 Incorrect HTTP status code returned by ATSv2 for non-whitelisted users.  Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d27ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d27ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d27ce1

Branch: refs/heads/HDDS-4
Commit: d6d27ce178573cb157501dcec7b1211e8553dbd6
Parents: 13b2af6
Author: Vrushali C <vr...@apache.org>
Authored: Wed May 9 22:17:48 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 ...elineReaderWhitelistAuthorizationFilter.java | 14 ++---
 ...elineReaderWhitelistAuthorizationFilter.java | 58 +++++++++++++-------
 2 files changed, 44 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d27ce1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
index 8093fcf..dbe391c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
@@ -27,15 +27,13 @@ import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderWebServicesUtils;
@@ -64,9 +62,12 @@ public class TimelineReaderWhitelistAuthorizationFilter implements Filter {
   @Override
   public void doFilter(ServletRequest request, ServletResponse response,
       FilterChain chain) throws IOException, ServletException {
+    HttpServletRequest httpRequest = (HttpServletRequest) request;
+    HttpServletResponse httpResponse = (HttpServletResponse) response;
+
     if (isWhitelistReadAuthEnabled) {
       UserGroupInformation callerUGI = TimelineReaderWebServicesUtils
-          .getCallerUserGroupInformation((HttpServletRequest) request, true);
+          .getCallerUserGroupInformation(httpRequest, true);
       if (callerUGI == null) {
         String msg = "Unable to obtain user name, user not authenticated";
         throw new AuthorizationException(msg);
@@ -76,9 +77,8 @@ public class TimelineReaderWhitelistAuthorizationFilter implements Filter {
         String userName = callerUGI.getShortUserName();
         String msg = "User " + userName
             + " is not allowed to read TimelineService V2 data.";
-        Response.status(Status.FORBIDDEN).entity(msg).build();
-        throw new ForbiddenException("user " + userName
-            + " is not allowed to read TimelineService V2 data");
+        httpResponse.sendError(HttpServletResponse.SC_FORBIDDEN, msg);
+        return;
       }
     }
     if (chain != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d27ce1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
index bd4f0c2..6f0a83d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
+import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -32,13 +33,12 @@ import java.util.Map;
 import javax.servlet.FilterConfig;
 import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
-import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderWhitelistAuthorizationFilter;
-import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -100,11 +100,11 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
       }
     });
 
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     f.doFilter(mockHsr, r, null);
   }
 
-  @Test(expected = ForbiddenException.class)
+  @Test
   public void checkFilterNotAllowedUser() throws ServletException, IOException {
     Map<String, String> map = new HashMap<String, String>();
     map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
@@ -115,14 +115,20 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
     FilterConfig fc = new DummyFilterConfig(map);
     f.init(fc);
     HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    String userName = "testuser1";
     Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
       @Override
       public String getName() {
-        return "testuser1";
+        return userName;
       }
     });
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     f.doFilter(mockHsr, r, null);
+
+    String msg = "User " + userName
+        + " is not allowed to read TimelineService V2 data.";
+    Mockito.verify(r)
+        .sendError(eq(HttpServletResponse.SC_FORBIDDEN), eq(msg));
   }
 
   @Test
@@ -143,7 +149,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return "user1";
       }
     });
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user1 =
         UserGroupInformation.createUserForTesting("user1", GROUP_NAMES);
     user1.doAs(new PrivilegedExceptionAction<Object>() {
@@ -155,7 +161,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
     });
   }
 
-  @Test(expected = ForbiddenException.class)
+  @Test
   public void checkFilterNotAlloweGroup()
       throws ServletException, IOException, InterruptedException {
     Map<String, String> map = new HashMap<String, String>();
@@ -167,15 +173,16 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
     FilterConfig fc = new DummyFilterConfig(map);
     f.init(fc);
     HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    String userName = "user200";
     Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
       @Override
       public String getName() {
-        return "user200";
+        return userName;
       }
     });
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user1 =
-        UserGroupInformation.createUserForTesting("user200", GROUP_NAMES);
+        UserGroupInformation.createUserForTesting(userName, GROUP_NAMES);
     user1.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws Exception {
@@ -183,6 +190,10 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return null;
       }
     });
+    String msg = "User " + userName
+        + " is not allowed to read TimelineService V2 data.";
+    Mockito.verify(r)
+        .sendError(eq(HttpServletResponse.SC_FORBIDDEN), eq(msg));
   }
 
   @Test
@@ -205,7 +216,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return "user90";
       }
     });
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user1 =
         UserGroupInformation.createUserForTesting("user90", GROUP_NAMES);
     user1.doAs(new PrivilegedExceptionAction<Object>() {
@@ -235,7 +246,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return "user90";
       }
     });
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user1 =
         UserGroupInformation.createUserForTesting("user90", GROUP_NAMES);
     user1.doAs(new PrivilegedExceptionAction<Object>() {
@@ -247,7 +258,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
     });
   }
 
-  @Test(expected = ForbiddenException.class)
+  @Test
   public void checkFilterAllowNoOneWhenAdminAclsEmptyAndUserAclsEmpty()
       throws ServletException, IOException, InterruptedException {
     // check that users in admin acl list are allowed to read
@@ -258,15 +269,16 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
     FilterConfig fc = new DummyFilterConfig(map);
     f.init(fc);
     HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    String userName = "user88";
     Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
       @Override
       public String getName() {
-        return "user88";
+        return userName;
       }
     });
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user1 =
-        UserGroupInformation.createUserForTesting("user88", GROUP_NAMES);
+        UserGroupInformation.createUserForTesting(userName, GROUP_NAMES);
     user1.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws Exception {
@@ -274,6 +286,10 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return null;
       }
     });
+    String msg = "User " + userName
+        + " is not allowed to read TimelineService V2 data.";
+    Mockito.verify(r)
+        .sendError(eq(HttpServletResponse.SC_FORBIDDEN), eq(msg));
   }
 
   @Test
@@ -293,7 +309,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return "user437";
       }
     });
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user1 =
         UserGroupInformation.createUserForTesting("user437", GROUP_NAMES);
     user1.doAs(new PrivilegedExceptionAction<Object>() {
@@ -327,7 +343,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
       }
     });
 
-    ServletResponse r = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user1 =
         // both username and group name are not part of admin and
         // read allowed users
@@ -348,7 +364,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return "user27";
       }
     });
-    ServletResponse r2 = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r2 = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user2 =
         UserGroupInformation.createUserForTesting("user27", GROUP_NAMES);
     user2.doAs(new PrivilegedExceptionAction<Object>() {
@@ -366,7 +382,7 @@ public class TestTimelineReaderWhitelistAuthorizationFilter {
         return "user2";
       }
     });
-    ServletResponse r3 = Mockito.mock(ServletResponse.class);
+    HttpServletResponse r3 = Mockito.mock(HttpServletResponse.class);
     UserGroupInformation user3 =
         UserGroupInformation.createUserForTesting("user2", GROUP_NAMES);
     user3.doAs(new PrivilegedExceptionAction<Object>() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDFS-13346. RBF: Fix synchronization of router quota and nameservice quota.

Posted by xy...@apache.org.
HDFS-13346. RBF: Fix synchronization of router quota and nameservice quota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3077c41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3077c41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3077c41

Branch: refs/heads/HDDS-4
Commit: c3077c41b890690a0faa8d08745d1f07a69d5dc0
Parents: 9db20b3
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri May 11 14:51:30 2018 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../federation/router/RouterAdminServer.java    | 28 ++++++++++++-
 .../federation/router/TestRouterAdminCLI.java   | 10 +++++
 .../federation/router/TestRouterQuota.java      | 43 +++++++++++++++++++-
 3 files changed, 79 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3077c41/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 3da9a5a..139dfb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -26,6 +26,7 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
@@ -54,6 +55,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableE
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
@@ -228,7 +230,31 @@ public class RouterAdminServer extends AbstractService
   @Override
   public UpdateMountTableEntryResponse updateMountTableEntry(
       UpdateMountTableEntryRequest request) throws IOException {
-    return getMountTableStore().updateMountTableEntry(request);
+    UpdateMountTableEntryResponse response =
+        getMountTableStore().updateMountTableEntry(request);
+
+    MountTable mountTable = request.getEntry();
+    if (mountTable != null) {
+      synchronizeQuota(mountTable);
+    }
+    return response;
+  }
+
+  /**
+   * Synchronize the quota value across mount table and subclusters.
+   * @param mountTable Quota set in given mount table.
+   * @throws IOException
+   */
+  private void synchronizeQuota(MountTable mountTable) throws IOException {
+    String path = mountTable.getSourcePath();
+    long nsQuota = mountTable.getQuota().getQuota();
+    long ssQuota = mountTable.getQuota().getSpaceQuota();
+
+    if (nsQuota != HdfsConstants.QUOTA_DONT_SET
+        || ssQuota != HdfsConstants.QUOTA_DONT_SET) {
+      this.router.getRpcServer().getQuotaModule().setQuota(path, nsQuota,
+          ssQuota, null);
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3077c41/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 2537c19..7e04e61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -51,6 +51,8 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
 
 import com.google.common.base.Supplier;
 
@@ -104,6 +106,14 @@ public class TestRouterAdminCLI {
     membership.registerNamenode(
         createNamenodeReport("ns1", "nn1", HAServiceState.ACTIVE));
     stateStore.refreshCaches(true);
+
+    // Mock the quota module since no real namenode is started up.
+    Quota quota = Mockito
+        .spy(routerContext.getRouter().createRpcServer().getQuotaModule());
+    Mockito.doNothing().when(quota).setQuota(Mockito.anyString(),
+        Mockito.anyLong(), Mockito.anyLong(), Mockito.any());
+    Whitebox.setInternalState(
+        routerContext.getRouter().getRpcServer(), "quotaCall", quota);
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3077c41/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
index 0e62200..c331c6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 
 import java.io.IOException;
@@ -37,9 +38,9 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
@@ -49,8 +50,10 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntr
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -452,4 +455,42 @@ public class TestRouterQuota {
 
     return removeResponse.getEntries();
   }
+
+  @Test
+  public void testQuotaSynchronization() throws IOException {
+    long updateNsQuota = 3;
+    long updateSsQuota = 4;
+    MountTable mountTable = MountTable.newInstance("/quotaSync",
+        Collections.singletonMap("ns0", "/"), Time.now(), Time.now());
+    mountTable.setQuota(new RouterQuotaUsage.Builder().quota(1)
+        .spaceQuota(2).build());
+    // Add new mount table
+    addMountTable(mountTable);
+
+    // ensure the quota is not set as updated value
+    QuotaUsage realQuota = nnContext1.getFileSystem()
+        .getQuotaUsage(new Path("/"));
+    assertNotEquals(updateNsQuota, realQuota.getQuota());
+    assertNotEquals(updateSsQuota, realQuota.getSpaceQuota());
+
+    // Call periodicInvoke to ensure quota  updated in quota manager
+    // and state store.
+    RouterQuotaUpdateService updateService = routerContext.getRouter()
+        .getQuotaCacheUpdateService();
+    updateService.periodicInvoke();
+
+    mountTable.setQuota(new RouterQuotaUsage.Builder().quota(updateNsQuota)
+        .spaceQuota(updateSsQuota).build());
+    UpdateMountTableEntryRequest updateRequest = UpdateMountTableEntryRequest
+        .newInstance(mountTable);
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    mountTableManager.updateMountTableEntry(updateRequest);
+
+    // verify if the quota is updated in real path
+    realQuota = nnContext1.getFileSystem().getQuotaUsage(
+        new Path("/"));
+    assertEquals(updateNsQuota, realQuota.getQuota());
+    assertEquals(updateSsQuota, realQuota.getSpaceQuota());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-8244. TestContainerSchedulerQueuing.testStartMultipleContainers failed. Contributed by Jim Brennan

Posted by xy...@apache.org.
YARN-8244. TestContainerSchedulerQueuing.testStartMultipleContainers failed. Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffe99d40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffe99d40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffe99d40

Branch: refs/heads/HDDS-4
Commit: ffe99d4053927e8a6c64a8f94d54d780587ae6de
Parents: a4ef351
Author: Jason Lowe <jl...@apache.org>
Authored: Fri May 11 14:07:32 2018 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../containermanager/TestContainerManager.java  |  20 ++--
 .../TestContainerSchedulerQueuing.java          | 100 +++++++------------
 2 files changed, 42 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffe99d40/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 6d198a4..ee5259f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -1486,8 +1486,6 @@ public class TestContainerManager extends BaseContainerManagerTest {
     containerManager.start();
 
     List<StartContainerRequest> list = new ArrayList<>();
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
     for (int i = 0; i < 10; i++) {
       ContainerId cId = createContainerId(i);
       long identifier = 0;
@@ -1500,8 +1498,9 @@ public class TestContainerManager extends BaseContainerManagerTest {
           createContainerToken(cId, identifier, context.getNodeId(), user,
             context.getContainerTokenSecretManager());
       StartContainerRequest request =
-          StartContainerRequest.newInstance(containerLaunchContext,
-            containerToken);
+          StartContainerRequest.newInstance(
+              recordFactory.newRecordInstance(ContainerLaunchContext.class),
+              containerToken);
       list.add(request);
     }
     StartContainersRequest requestList =
@@ -1531,9 +1530,6 @@ public class TestContainerManager extends BaseContainerManagerTest {
   public void testMultipleContainersStopAndGetStatus() throws Exception {
     containerManager.start();
     List<StartContainerRequest> startRequest = new ArrayList<>();
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<ContainerId> containerIds = new ArrayList<>();
     for (int i = 0; i < 10; i++) {
       ContainerId cId;
@@ -1547,8 +1543,9 @@ public class TestContainerManager extends BaseContainerManagerTest {
           createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(),
             user, context.getContainerTokenSecretManager());
       StartContainerRequest request =
-          StartContainerRequest.newInstance(containerLaunchContext,
-            containerToken);
+          StartContainerRequest.newInstance(
+              recordFactory.newRecordInstance(ContainerLaunchContext.class),
+              containerToken);
       startRequest.add(request);
       containerIds.add(cId);
     }
@@ -1788,15 +1785,14 @@ public class TestContainerManager extends BaseContainerManagerTest {
     containerManager.start();
     // Start 4 containers 0..4 with default resource (1024, 1)
     List<StartContainerRequest> list = new ArrayList<>();
-    ContainerLaunchContext containerLaunchContext = recordFactory
-        .newRecordInstance(ContainerLaunchContext.class);
     for (int i = 0; i < 4; i++) {
       ContainerId cId = createContainerId(i);
       long identifier = DUMMY_RM_IDENTIFIER;
       Token containerToken = createContainerToken(cId, identifier,
           context.getNodeId(), user, context.getContainerTokenSecretManager());
       StartContainerRequest request = StartContainerRequest.newInstance(
-          containerLaunchContext, containerToken);
+          recordFactory.newRecordInstance(ContainerLaunchContext.class),
+          containerToken);
       list.add(request);
     }
     StartContainersRequest requestList = StartContainersRequest

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffe99d40/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index 1da7e4a..70066c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -229,19 +229,16 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testStartMultipleContainers() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(1024, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.GUARANTEED)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(1024, 1),
@@ -285,19 +282,16 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testQueueMultipleContainers() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(3072, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.GUARANTEED)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(3072, 1),
@@ -343,26 +337,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testStartAndQueueMultipleContainers() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(1024, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(1024, 1),
@@ -415,12 +406,9 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testStartOpportunistcsWhenOppQueueIsFull() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
@@ -432,7 +420,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
         YarnConfiguration.DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH);
     for (int i = 1; i < maxOppQueueLength + 2; i++) {
       list.add(StartContainerRequest.newInstance(
-          containerLaunchContext,
+          recordFactory.newRecordInstance(ContainerLaunchContext.class),
           createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
               context.getNodeId(),
               user, BuilderUtils.newResource(2048, 1),
@@ -500,26 +488,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testKillOpportunisticForGuaranteedContainer() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
@@ -589,12 +574,10 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
     Listener listener = new Listener();
     ((NodeManager.DefaultContainerStateListener)containerManager.getContext().
         getContainerStateTransitionListener()).addListener(listener);
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
 
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
@@ -610,7 +593,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
 
     list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
@@ -718,42 +701,42 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
 
     list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(3), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(4), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(5), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(6), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
@@ -840,14 +823,14 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
 
     list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
@@ -887,26 +870,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testKillMultipleOpportunisticContainers() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
@@ -919,7 +899,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
 
     list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(3), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(1500, 1),
@@ -967,14 +947,11 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testKillOnlyRequiredOpportunisticContainers() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     // Fill NM with Opportunistic containers
     for (int i = 0; i < 4; i++) {
       list.add(StartContainerRequest.newInstance(
-          containerLaunchContext,
+          recordFactory.newRecordInstance(ContainerLaunchContext.class),
           createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
               context.getNodeId(),
               user, BuilderUtils.newResource(512, 1),
@@ -990,7 +967,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
     // Now ask for two Guaranteed containers
     for (int i = 4; i < 6; i++) {
       list.add(StartContainerRequest.newInstance(
-          containerLaunchContext,
+          recordFactory.newRecordInstance(ContainerLaunchContext.class),
           createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
               context.getNodeId(),
               user, BuilderUtils.newResource(512, 1),
@@ -1036,26 +1013,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
   public void testStopQueuedContainer() throws Exception {
     containerManager.start();
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.GUARANTEED)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(512, 1),
@@ -1142,19 +1116,16 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
     ((NodeManager.DefaultContainerStateListener)containerManager.getContext().
         getContainerStateTransitionListener()).addListener(listener);
 
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     List<StartContainerRequest> list = new ArrayList<>();
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(2048, 1),
             context.getContainerTokenSecretManager(), null,
             ExecutionType.OPPORTUNISTIC)));
     list.add(StartContainerRequest.newInstance(
-        containerLaunchContext,
+        recordFactory.newRecordInstance(ContainerLaunchContext.class),
         createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
             context.getNodeId(),
             user, BuilderUtils.newResource(1024, 1),
@@ -1265,12 +1236,9 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
     containerManager.start();
     // Construct the Container-id
     ContainerId cId = createContainerId(0);
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     StartContainerRequest scRequest =
         StartContainerRequest.newInstance(
-            containerLaunchContext,
+            recordFactory.newRecordInstance(ContainerLaunchContext.class),
             createContainerToken(cId, DUMMY_RM_IDENTIFIER,
                 context.getNodeId(), user, BuilderUtils.newResource(512, 1),
                 context.getContainerTokenSecretManager(), null));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-8201. Skip stacktrace of few exception from ClientRMService. Contributed by Bilwa S T.

Posted by xy...@apache.org.
YARN-8201. Skip stacktrace of few exception from ClientRMService. Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13b2af65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13b2af65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13b2af65

Branch: refs/heads/HDDS-4
Commit: 13b2af65107160446c71412421bf0cf2df742ba4
Parents: ea1867e
Author: bibinchundatt <bi...@apache.org>
Authored: Thu May 10 09:15:46 2018 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/server/resourcemanager/ClientRMService.java      | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b2af65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 55a3f0b..feaa5cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -265,6 +265,10 @@ public class ClientRMService extends AbstractService implements
             conf.getInt(YarnConfiguration.RM_CLIENT_THREAD_COUNT, 
                 YarnConfiguration.DEFAULT_RM_CLIENT_THREAD_COUNT));
     
+    this.server.addTerseExceptions(ApplicationNotFoundException.class,
+        ApplicationAttemptNotFoundException.class,
+        ContainerNotFoundException.class);
+
     // Enable service authorization?
     if (conf.getBoolean(
         CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9c79f77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9c79f77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9c79f77

Branch: refs/heads/HDDS-4
Commit: c9c79f775ad99469439bae2b8346e615e2870247
Parents: 5f821a7
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:10 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 ------------
 1 file changed, 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9c79f77/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index deb286d..6998a85 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -120,18 +120,6 @@
     </description>
   </property>
   <property>
-    <name>dfs.ratis.client.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis client request.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server request.</description>
-  </property>
-  <property>
     <name>ozone.container.report.interval</name>
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HDFS-13542. TestBlockManager#testNeededReplicationWhileAppending fails due to improper cluster shutdown in TestBlockManager#testBlockManagerMachinesArray on Windows. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HDFS-13542. TestBlockManager#testNeededReplicationWhileAppending fails due to improper cluster shutdown in TestBlockManager#testBlockManagerMachinesArray on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f809c2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f809c2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f809c2b

Branch: refs/heads/HDDS-4
Commit: 6f809c2b94e24645b8410de221493d2a869984d9
Parents: c3077c4
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 11 09:47:57 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../blockmanagement/TestBlockManager.java       | 155 ++++++++++---------
 1 file changed, 85 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f809c2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 5219a44..58ca2e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -452,8 +452,8 @@ public class TestBlockManager {
     String src = "/test-file";
     Path file = new Path(src);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    cluster.waitActive();
     try {
+      cluster.waitActive();
       BlockManager bm = cluster.getNamesystem().getBlockManager();
       FileSystem fs = cluster.getFileSystem();
       NamenodeProtocols namenode = cluster.getNameNodeRpc();
@@ -492,7 +492,9 @@ public class TestBlockManager {
         IOUtils.closeStream(out);
       }
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
   
@@ -1043,7 +1045,9 @@ public class TestBlockManager {
       assertTrue(fs.exists(file1));
       fs.delete(file1, true);
       assertTrue(!fs.exists(file1));
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
 
@@ -1143,7 +1147,9 @@ public class TestBlockManager {
       assertEquals(0, bm.getBlockOpQueueLength());
       assertTrue(doneLatch.await(1, TimeUnit.SECONDS));
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
 
@@ -1218,7 +1224,9 @@ public class TestBlockManager {
       long batched = MetricsAsserts.getLongCounter("BlockOpsBatched", rb);
       assertTrue(batched > 0);
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
 
@@ -1227,76 +1235,83 @@ public class TestBlockManager {
     final Configuration conf = new HdfsConfiguration();
     final MiniDFSCluster cluster =
         new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-    cluster.waitActive();
-    BlockManager blockManager = cluster.getNamesystem().getBlockManager();
-    FileSystem fs = cluster.getFileSystem();
-    final Path filePath = new Path("/tmp.txt");
-    final long fileLen = 1L;
-    DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
-    DFSTestUtil.waitForReplication((DistributedFileSystem)fs,
-        filePath, (short) 3, 60000);
-    ArrayList<DataNode> datanodes = cluster.getDataNodes();
-    assertEquals(datanodes.size(), 4);
-    FSNamesystem ns = cluster.getNamesystem();
-    // get the block
-    final String bpid = cluster.getNamesystem().getBlockPoolId();
-    File storageDir = cluster.getInstanceStorageDir(0, 0);
-    File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-    assertTrue("Data directory does not exist", dataDir.exists());
-    BlockInfo blockInfo = blockManager.blocksMap.getBlocks().iterator().next();
-    ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(),
-        blockInfo.getNumBytes(), blockInfo.getGenerationStamp());
-    DatanodeDescriptor failedStorageDataNode =
-        blockManager.getStoredBlock(blockInfo).getDatanode(0);
-    DatanodeDescriptor corruptStorageDataNode =
-        blockManager.getStoredBlock(blockInfo).getDatanode(1);
-
-    ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
-    for(int i=0; i<failedStorageDataNode.getStorageInfos().length; i++) {
-      DatanodeStorageInfo storageInfo = failedStorageDataNode
-          .getStorageInfos()[i];
-      DatanodeStorage dns = new DatanodeStorage(
-          failedStorageDataNode.getStorageInfos()[i].getStorageID(),
-          DatanodeStorage.State.FAILED,
-          failedStorageDataNode.getStorageInfos()[i].getStorageType());
-      while(storageInfo.getBlockIterator().hasNext()) {
-        BlockInfo blockInfo1 = storageInfo.getBlockIterator().next();
-        if(blockInfo1.equals(blockInfo)) {
-          StorageReport report = new StorageReport(
-              dns, true, storageInfo.getCapacity(),
-              storageInfo.getDfsUsed(), storageInfo.getRemaining(),
-              storageInfo.getBlockPoolUsed(), 0L);
-          reports.add(report);
-          break;
+    try {
+      cluster.waitActive();
+      BlockManager blockManager = cluster.getNamesystem().getBlockManager();
+      FileSystem fs = cluster.getFileSystem();
+      final Path filePath = new Path("/tmp.txt");
+      final long fileLen = 1L;
+      DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
+      DFSTestUtil.waitForReplication((DistributedFileSystem)fs,
+          filePath, (short) 3, 60000);
+      ArrayList<DataNode> datanodes = cluster.getDataNodes();
+      assertEquals(datanodes.size(), 4);
+      FSNamesystem ns = cluster.getNamesystem();
+      // get the block
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
+      File storageDir = cluster.getInstanceStorageDir(0, 0);
+      File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+      assertTrue("Data directory does not exist", dataDir.exists());
+      BlockInfo blockInfo =
+          blockManager.blocksMap.getBlocks().iterator().next();
+      ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(),
+          blockInfo.getNumBytes(), blockInfo.getGenerationStamp());
+      DatanodeDescriptor failedStorageDataNode =
+          blockManager.getStoredBlock(blockInfo).getDatanode(0);
+      DatanodeDescriptor corruptStorageDataNode =
+          blockManager.getStoredBlock(blockInfo).getDatanode(1);
+
+      ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
+      for(int i=0; i<failedStorageDataNode.getStorageInfos().length; i++) {
+        DatanodeStorageInfo storageInfo = failedStorageDataNode
+            .getStorageInfos()[i];
+        DatanodeStorage dns = new DatanodeStorage(
+            failedStorageDataNode.getStorageInfos()[i].getStorageID(),
+            DatanodeStorage.State.FAILED,
+            failedStorageDataNode.getStorageInfos()[i].getStorageType());
+        while(storageInfo.getBlockIterator().hasNext()) {
+          BlockInfo blockInfo1 = storageInfo.getBlockIterator().next();
+          if(blockInfo1.equals(blockInfo)) {
+            StorageReport report = new StorageReport(
+                dns, true, storageInfo.getCapacity(),
+                storageInfo.getDfsUsed(), storageInfo.getRemaining(),
+                storageInfo.getBlockPoolUsed(), 0L);
+            reports.add(report);
+            break;
+          }
         }
       }
-    }
-    failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
-        .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
-    ns.writeLock();
-    DatanodeStorageInfo corruptStorageInfo= null;
-    for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
-      corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
-      while(corruptStorageInfo.getBlockIterator().hasNext()) {
-        BlockInfo blockInfo1 = corruptStorageInfo.getBlockIterator().next();
-        if (blockInfo1.equals(blockInfo)) {
-          break;
+      failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
+          .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
+      ns.writeLock();
+      DatanodeStorageInfo corruptStorageInfo= null;
+      for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
+        corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
+        while(corruptStorageInfo.getBlockIterator().hasNext()) {
+          BlockInfo blockInfo1 = corruptStorageInfo.getBlockIterator().next();
+          if (blockInfo1.equals(blockInfo)) {
+            break;
+          }
         }
       }
+      blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
+          corruptStorageInfo.getStorageID(),
+          CorruptReplicasMap.Reason.ANY.toString());
+      ns.writeUnlock();
+      BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
+      ns.readLock();
+      LocatedBlocks locatedBlocks =
+          blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
+              false, false, null, null);
+      assertTrue("Located Blocks should exclude corrupt" +
+              "replicas and failed storages",
+          locatedBlocks.getLocatedBlocks().size() == 1);
+      ns.readUnlock();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
-    blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
-        corruptStorageInfo.getStorageID(),
-        CorruptReplicasMap.Reason.ANY.toString());
-    ns.writeUnlock();
-    BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
-    ns.readLock();
-    LocatedBlocks locatedBlocks =
-        blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
-        false, false, null, null);
-    assertTrue("Located Blocks should exclude corrupt" +
-        "replicas and failed storages",
-        locatedBlocks.getLocatedBlocks().size() == 1);
-    ns.readUnlock();
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: Add 2.9.1 release notes and changes documents

Posted by xy...@apache.org.
Add 2.9.1 release notes and changes documents


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7192749c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7192749c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7192749c

Branch: refs/heads/HDDS-4
Commit: 7192749c8b5d95313e8575c6875846c698632673
Parents: 187a00f
Author: Sammi Chen <sa...@intel.com>
Authored: Mon May 14 15:14:02 2018 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../markdown/release/2.9.1/CHANGES.2.9.1.md     | 277 ++++++++++++++++
 .../release/2.9.1/RELEASENOTES.2.9.1.md         |  88 ++++++
 .../jdiff/Apache_Hadoop_HDFS_2.9.1.xml          | 312 +++++++++++++++++++
 hadoop-project-dist/pom.xml                     |   2 +-
 4 files changed, 678 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7192749c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
new file mode 100644
index 0000000..c5e53f6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
@@ -0,0 +1,277 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" Changelog
+
+## Release 2.9.1 - 2018-04-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: Document Router and State Store metrics |  Major | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add ACL support for mount table |  Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath |  Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun Saxena |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | Yiqun Lin |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | Incorporate Aliyun OSS file system implementation |  Major | fs, fs/oss | shimingfei | mingfei.shi |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | CryptoInputStream should implement unbuffer |  Major | fs, security | John Zhuge | John Zhuge |
+| [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | AliyunOSS: backport Aliyun OSS module to branch-2 |  Major | fs/oss | Genmao Yu | SammiChen |
+| [YARN-6851](https://issues.apache.org/jira/browse/YARN-6851) | Capacity Scheduler: document configs for controlling # containers allowed to be allocated per node heartbeat |  Minor | . | Wei Yan | Wei Yan |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve robustness of the AggregatedLogDeletionService |  Major | log-aggregation | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager web UI should display container type in containers page |  Major | nodemanager, webapp | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | AliyunOSS: change the default value of max error retry |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-7642](https://issues.apache.org/jira/browse/YARN-7642) | Add test case to verify context update after container promotion or demotion with or without auto update |  Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15111](https://issues.apache.org/jira/browse/HADOOP-15111) | AliyunOSS: backport HADOOP-14993 to branch-2 |  Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-9023](https://issues.apache.org/jira/browse/HDFS-9023) | When NN is not able to identify DN for replication, reason behind it can be logged |  Critical | hdfs-client, namenode | Surendra Singh Lilhore | Xiao Chen |
+| [YARN-7678](https://issues.apache.org/jira/browse/YARN-7678) | Ability to enable logging of container memory stats |  Major | nodemanager | Jim Brennan | Jim Brennan |
+| [HDFS-12945](https://issues.apache.org/jira/browse/HDFS-12945) | Switch to ClientProtocol instead of NamenodeProtocols in NamenodeWebHdfsMethods |  Minor | . | Wei Yan | Wei Yan |
+| [YARN-7590](https://issues.apache.org/jira/browse/YARN-7590) | Improve container-executor validation check |  Major | security, yarn | Eric Yang | Eric Yang |
+| [HADOOP-15189](https://issues.apache.org/jira/browse/HADOOP-15189) | backport HADOOP-15039 to branch-2 and branch-3 |  Blocker | . | Genmao Yu | Genmao Yu |
+| [HADOOP-15212](https://issues.apache.org/jira/browse/HADOOP-15212) | Add independent secret manager method for logging expired tokens |  Major | security | Daryn Sharp | Daryn Sharp |
+| [YARN-7728](https://issues.apache.org/jira/browse/YARN-7728) | Expose container preemptions related information in Capacity Scheduler queue metrics |  Major | . | Eric Payne | Eric Payne |
+| [MAPREDUCE-7048](https://issues.apache.org/jira/browse/MAPREDUCE-7048) | Uber AM can crash due to unknown task in statusUpdate |  Major | mr-am | Peter Bacsko | Peter Bacsko |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue |  Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica |  Major | datanode | Wei-Chiu Chuang | Gabor Bota |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations |  Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
+| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo |  Major | namenode | Konstantin Shvachko | chencan |
+| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin |  Major | build | Arpit Agarwal | Arpit Agarwal |
+| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation |  Major | . | Arun Suresh | Jonathan Hung |
+| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption |  Major | namenode | Arpit Agarwal | Arpit Agarwal |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13723](https://issues.apache.org/jira/browse/HADOOP-13723) | AliyunOSSInputStream#read() should update read bytes stat correctly |  Major | tools | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14045](https://issues.apache.org/jira/browse/HADOOP-14045) | Aliyun OSS documentation missing from website |  Major | documentation, fs/oss | Andrew Wang | Yiqun Lin |
+| [HADOOP-14458](https://issues.apache.org/jira/browse/HADOOP-14458) | Add missing imports to TestAliyunOSSFileSystemContract.java |  Trivial | fs/oss, test | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14466](https://issues.apache.org/jira/browse/HADOOP-14466) | Remove useless document from TestAliyunOSSFileSystemContract.java |  Minor | documentation | Akira Ajisaka | Chen Liang |
+| [HDFS-12318](https://issues.apache.org/jira/browse/HDFS-12318) | Fix IOException condition for openInfo in DFSInputStream |  Major | . | legend | legend |
+| [HDFS-12614](https://issues.apache.org/jira/browse/HDFS-12614) | FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured |  Major | . | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-12788](https://issues.apache.org/jira/browse/HDFS-12788) | Reset the upload button when file upload fails |  Critical | ui, webhdfs | Brahma Reddy Battula | Brahma Reddy Battula |
+| [YARN-7388](https://issues.apache.org/jira/browse/YARN-7388) | TestAMRestart should be scheduler agnostic |  Major | . | Haibo Chen | Haibo Chen |
+| [HDFS-12705](https://issues.apache.org/jira/browse/HDFS-12705) | WebHdfsFileSystem exceptions should retain the caused by exception |  Major | hdfs | Daryn Sharp | Hanisha Koneru |
+| [YARN-7361](https://issues.apache.org/jira/browse/YARN-7361) | Improve the docker container runtime documentation |  Major | . | Shane Kumpf | Shane Kumpf |
+| [YARN-7469](https://issues.apache.org/jira/browse/YARN-7469) | Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit |  Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7489](https://issues.apache.org/jira/browse/YARN-7489) | ConcurrentModificationException in RMAppImpl#getRMAppMetrics |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7525](https://issues.apache.org/jira/browse/YARN-7525) | Incorrect query parameters in cluster nodes REST API document |  Minor | documentation | Tao Yang | Tao Yang |
+| [HADOOP-15045](https://issues.apache.org/jira/browse/HADOOP-15045) | ISA-L build options are documented in branch-2 |  Major | build, documentation | Akira Ajisaka | Akira Ajisaka |
+| [YARN-7390](https://issues.apache.org/jira/browse/YARN-7390) | All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. |  Major | fairscheduler, reservation system | Yufei Gu | Yufei Gu |
+| [HDFS-12754](https://issues.apache.org/jira/browse/HDFS-12754) | Lease renewal can hit a deadlock |  Major | . | Kuhu Shukla | Kuhu Shukla |
+| [HDFS-12832](https://issues.apache.org/jira/browse/HDFS-12832) | INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit |  Critical | namenode | DENG FEI | Konstantin Shvachko |
+| [HDFS-11754](https://issues.apache.org/jira/browse/HDFS-11754) | Make FsServerDefaults cache configurable. |  Minor | . | Rushabh S Shah | Mikhail Erofeev |
+| [YARN-7509](https://issues.apache.org/jira/browse/YARN-7509) | AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby |  Critical | . | Tao Yang | Tao Yang |
+| [YARN-7558](https://issues.apache.org/jira/browse/YARN-7558) | "yarn logs" command fails to get logs for running containers if UI authentication is enabled. |  Critical | . | Namit Maheshwari | Xuan Gong |
+| [HDFS-12638](https://issues.apache.org/jira/browse/HDFS-12638) | Delete copy-on-truncate block along with the original block, when deleting a file being truncated |  Blocker | hdfs | Jiandan Yang | Konstantin Shvachko |
+| [MAPREDUCE-5124](https://issues.apache.org/jira/browse/MAPREDUCE-5124) | AM lacks flow control for task events |  Major | mr-am | Jason Lowe | Peter Bacsko |
+| [YARN-7455](https://issues.apache.org/jira/browse/YARN-7455) | quote\_and\_append\_arg can overflow buffer |  Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-14985](https://issues.apache.org/jira/browse/HADOOP-14985) | Remove subversion related code from VersionInfoMojo.java |  Minor | build | Akira Ajisaka | Ajay Kumar |
+| [HDFS-12889](https://issues.apache.org/jira/browse/HDFS-12889) | Router UI is missing robots.txt file |  Major | . | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-11576](https://issues.apache.org/jira/browse/HDFS-11576) | Block recovery will fail indefinitely if recovery time \> heartbeat interval |  Critical | datanode, hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [YARN-7607](https://issues.apache.org/jira/browse/YARN-7607) | Remove the trailing duplicated timestamp in container diagnostics message |  Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15080](https://issues.apache.org/jira/browse/HADOOP-15080) | Aliyun OSS: update oss sdk from 2.8.1 to 2.8.3 to remove its dependency on Cat-x "json-lib" |  Blocker | fs/oss | Chris Douglas | SammiChen |
+| [YARN-7591](https://issues.apache.org/jira/browse/YARN-7591) | NPE in async-scheduling mode of CapacityScheduler |  Critical | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7608](https://issues.apache.org/jira/browse/YARN-7608) | Incorrect sTarget column causing DataTable warning on RM application and scheduler web page |  Major | resourcemanager, webapp | Weiwei Yang | Gergely NovƔk |
+| [HDFS-12833](https://issues.apache.org/jira/browse/HDFS-12833) | Distcp : Update the usage of delete option for dependency with update and overwrite option |  Minor | distcp, hdfs | Harshakiran Reddy | usharani |
+| [YARN-7647](https://issues.apache.org/jira/browse/YARN-7647) | NM print inappropriate error log when node-labels is enabled |  Minor | . | Yang Wang | Yang Wang |
+| [HDFS-12907](https://issues.apache.org/jira/browse/HDFS-12907) | Allow read-only access to reserved raw for non-superusers |  Major | namenode | Daryn Sharp | Rushabh S Shah |
+| [HDFS-12881](https://issues.apache.org/jira/browse/HDFS-12881) | Output streams closed with IOUtils suppressing write errors |  Major | . | Jason Lowe | Ajay Kumar |
+| [YARN-7595](https://issues.apache.org/jira/browse/YARN-7595) | Container launching code suppresses close exceptions after writes |  Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-15085](https://issues.apache.org/jira/browse/HADOOP-15085) | Output streams closed with IOUtils suppressing write errors |  Major | . | Jason Lowe | Jim Brennan |
+| [YARN-7661](https://issues.apache.org/jira/browse/YARN-7661) | NodeManager metrics return wrong value after update node resource |  Major | . | Yang Wang | Yang Wang |
+| [HDFS-12347](https://issues.apache.org/jira/browse/HDFS-12347) | TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently |  Critical | test | Xiao Chen | Bharat Viswanadham |
+| [YARN-7542](https://issues.apache.org/jira/browse/YARN-7542) | Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED |  Major | . | Arun Suresh | Sampada Dehankar |
+| [HADOOP-15143](https://issues.apache.org/jira/browse/HADOOP-15143) | NPE due to Invalid KerberosTicket in UGI |  Major | . | Jitendra Nath Pandey | Mukul Kumar Singh |
+| [YARN-7692](https://issues.apache.org/jira/browse/YARN-7692) | Skip validating priority acls while recovering applications |  Blocker | resourcemanager | Charan Hebri | Sunil G |
+| [MAPREDUCE-7028](https://issues.apache.org/jira/browse/MAPREDUCE-7028) | Concurrent task progress updates causing NPE in Application Master |  Blocker | mr-am | Gergo Repas | Gergo Repas |
+| [YARN-7619](https://issues.apache.org/jira/browse/YARN-7619) | Max AM Resource value in Capacity Scheduler UI has to be refreshed for every user |  Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7699](https://issues.apache.org/jira/browse/YARN-7699) | queueUsagePercentage is coming as INF for getApp REST api call |  Major | webapp | Sunil G | Sunil G |
+| [YARN-7508](https://issues.apache.org/jira/browse/YARN-7508) | NPE in FiCaSchedulerApp when debug log enabled in async-scheduling mode |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7663](https://issues.apache.org/jira/browse/YARN-7663) | RMAppImpl:Invalid event: START at KILLED |  Minor | resourcemanager | lujie | lujie |
+| [YARN-6948](https://issues.apache.org/jira/browse/YARN-6948) | Invalid event: ATTEMPT\_ADDED at FINAL\_SAVING |  Minor | yarn | lujie | lujie |
+| [YARN-7735](https://issues.apache.org/jira/browse/YARN-7735) | Fix typo in YARN documentation |  Minor | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-7727](https://issues.apache.org/jira/browse/YARN-7727) | Incorrect log levels in few logs with QueuePriorityContainerCandidateSelector |  Minor | yarn | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-11915](https://issues.apache.org/jira/browse/HDFS-11915) | Sync rbw dir on the first hsync() to avoid file lost on power failure |  Critical | . | Kanaka Kumar Avvaru | Vinayakumar B |
+| [HDFS-9049](https://issues.apache.org/jira/browse/HDFS-9049) | Make Datanode Netty reverse proxy port to be configurable |  Major | datanode | Vinayakumar B | Vinayakumar B |
+| [HADOOP-15150](https://issues.apache.org/jira/browse/HADOOP-15150) | in FsShell, UGI params should be overidden through env vars(-D arg) |  Major | . | Brahma Reddy Battula | Brahma Reddy Battula |
+| [HADOOP-15181](https://issues.apache.org/jira/browse/HADOOP-15181) | Typo in SecureMode.md |  Trivial | documentation | Masahiro Tanaka | Masahiro Tanaka |
+| [YARN-7737](https://issues.apache.org/jira/browse/YARN-7737) | prelaunch.err file not found exception on container failure |  Major | . | Jonathan Hung | Keqiu Hu |
+| [HDFS-13063](https://issues.apache.org/jira/browse/HDFS-13063) | Fix the incorrect spelling in HDFSHighAvailabilityWithQJM.md |  Trivial | documentation | Jianfei Jiang | Jianfei Jiang |
+| [YARN-7102](https://issues.apache.org/jira/browse/YARN-7102) | NM heartbeat stuck when responseId overflows MAX\_INT |  Critical | . | Botong Huang | Botong Huang |
+| [HADOOP-15151](https://issues.apache.org/jira/browse/HADOOP-15151) | MapFile.fix creates a wrong index file in case of block-compressed data file. |  Major | common | Grigori Rybkine | Grigori Rybkine |
+| [MAPREDUCE-7020](https://issues.apache.org/jira/browse/MAPREDUCE-7020) | Task timeout in uber mode can crash AM |  Major | mr-am | Akira Ajisaka | Peter Bacsko |
+| [YARN-7698](https://issues.apache.org/jira/browse/YARN-7698) | A misleading variable's name in ApplicationAttemptEventDispatcher |  Minor | resourcemanager | Jinjiang Ling | Jinjiang Ling |
+| [HDFS-13100](https://issues.apache.org/jira/browse/HDFS-13100) | Handle IllegalArgumentException when GETSERVERDEFAULTS is not implemented in webhdfs. |  Critical | hdfs, webhdfs | Yongjun Zhang | Yongjun Zhang |
+| [YARN-6868](https://issues.apache.org/jira/browse/YARN-6868) | Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml |  Major | yarn | Ray Chiang | Ray Chiang |
+| [YARN-7849](https://issues.apache.org/jira/browse/YARN-7849) | TestMiniYarnClusterNodeUtilization#testUpdateNodeUtilization fails due to heartbeat sync error |  Major | test | Jason Lowe | Botong Huang |
+| [YARN-7801](https://issues.apache.org/jira/browse/YARN-7801) | AmFilterInitializer should addFilter after fill all parameters |  Critical | . | Sumana Sathish | Wangda Tan |
+| [YARN-7890](https://issues.apache.org/jira/browse/YARN-7890) | NPE during container relaunch |  Major | . | Billie Rinaldi | Jason Lowe |
+| [HDFS-12935](https://issues.apache.org/jira/browse/HDFS-12935) | Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up |  Major | tools | Jianfei Jiang | Jianfei Jiang |
+| [HDFS-13120](https://issues.apache.org/jira/browse/HDFS-13120) | Snapshot diff could be corrupted after concat |  Major | namenode, snapshots | Xiaoyu Yao | Xiaoyu Yao |
+| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HDFS-8693](https://issues.apache.org/jira/browse/HDFS-8693) | refreshNamenodes does not support adding a new standby to a running DN |  Critical | datanode, ha | Jian Fang | Ajith S |
+| [MAPREDUCE-7052](https://issues.apache.org/jira/browse/MAPREDUCE-7052) | TestFixedLengthInputFormat#testFormatCompressedIn is flaky |  Major | client, test | Peter Bacsko | Peter Bacsko |
+| [HDFS-13112](https://issues.apache.org/jira/browse/HDFS-13112) | Token expiration edits may cause log corruption or deadlock |  Critical | namenode | Daryn Sharp | Daryn Sharp |
+| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump |  Major | . | Jason Lowe | Jason Lowe |
+| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small |  Major | . | Aki Tanaka | Aki Tanaka |
+| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps |  Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [YARN-7945](https://issues.apache.org/jira/browse/YARN-7945) | Java Doc error in UnmanagedAMPoolManager for branch-2 |  Major | . | Rohith Sharma K S | Botong Huang |
+| [HADOOP-14903](https://issues.apache.org/jira/browse/HADOOP-14903) | Add json-smart explicitly to pom.xml |  Major | common | Ray Chiang | Ray Chiang |
+| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. |  Major | datanode | Harshakiran Reddy | Brahma Reddy Battula |
+| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss |  Major | . | Daryn Sharp | Kihwal Lee |
+| [HADOOP-15251](https://issues.apache.org/jira/browse/HADOOP-15251) | Backport HADOOP-13514 (surefire upgrade) to branch-2 |  Major | test | Chris Douglas | Chris Douglas |
+| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry |  Minor | documentation | Nanda kumar | Nanda kumar |
+| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container |  Major | nodemanager | Tao Yang | Tao Yang |
+| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun |  Minor | test | Gergely NovƔk | Gergely NovƔk |
+| [HADOOP-15283](https://issues.apache.org/jira/browse/HADOOP-15283) | Upgrade from findbugs 3.0.1 to spotbugs 3.1.2 in branch-2 to fix docker image build |  Major | . | Xiao Chen | Akira Ajisaka |
+| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document |  Minor | documentation | Akira Ajisaka | Sen Zhao |
+| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException |  Major | hdfs-client | Xiao Chen | Xiao Chen |
+| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands |  Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml |  Major | mrv2 | Daniel Templeton | Sen Zhao |
+| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative |  Major | test | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths |  Major | . | ĆĆ±igo Goiri | Xiao Liang |
+| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery |  Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases |  Major | . | Xiao Liang | Xiao Liang |
+| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows |  Major | . | ĆĆ±igo Goiri | Xiao Liang |
+| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7873](https://issues.apache.org/jira/browse/YARN-7873) | Revert YARN-6078 |  Blocker | . | Billie Rinaldi | Billie Rinaldi |
+| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page  cannot display the current value after reconfig |  Minor | datanode | maobaolong | maobaolong |
+| [HADOOP-15320](https://issues.apache.org/jira/browse/HADOOP-15320) | Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake |  Major | fs/adl, fs/azure | shanyu zhao | shanyu zhao |
+| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store |  Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document |  Minor | documentation | Akira Ajisaka | Akira Ajisaka |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14696](https://issues.apache.org/jira/browse/HADOOP-14696) | parallel tests don't work for Windows |  Minor | test | Allen Wittenauer | Allen Wittenauer |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13481](https://issues.apache.org/jira/browse/HADOOP-13481) | User end documents for Aliyun OSS FileSystem |  Minor | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13591](https://issues.apache.org/jira/browse/HADOOP-13591) | Unit test failure in TestOSSContractGetFileStatus and TestOSSContractRootDir |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13624](https://issues.apache.org/jira/browse/HADOOP-13624) | Rename TestAliyunOSSContractDispCp |  Major | fs, fs/oss | Kai Zheng | Genmao Yu |
+| [HADOOP-14065](https://issues.apache.org/jira/browse/HADOOP-14065) | AliyunOSS: oss directory filestatus should use meta time |  Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13768](https://issues.apache.org/jira/browse/HADOOP-13768) | AliyunOSS: handle the failure in the batch delete operation `deleteDirs`. |  Major | fs | Genmao Yu | Genmao Yu |
+| [HADOOP-14069](https://issues.apache.org/jira/browse/HADOOP-14069) | AliyunOSS: listStatus returns wrong file info |  Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13769](https://issues.apache.org/jira/browse/HADOOP-13769) | AliyunOSS: update oss sdk version |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14072](https://issues.apache.org/jira/browse/HADOOP-14072) | AliyunOSS: Failed to read from stream when seek beyond the download size |  Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14192](https://issues.apache.org/jira/browse/HADOOP-14192) | Aliyun OSS FileSystem contract test should implement getTestBaseDir() |  Major | fs/oss | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14194](https://issues.apache.org/jira/browse/HADOOP-14194) | Aliyun OSS should not use empty endpoint as default |  Major | fs/oss | Mingliang Liu | Genmao Yu |
+| [HADOOP-14787](https://issues.apache.org/jira/browse/HADOOP-14787) | AliyunOSS: Implement the `createNonRecursive` operator |  Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14649](https://issues.apache.org/jira/browse/HADOOP-14649) | Update aliyun-sdk-oss version to 2.8.1 |  Major | fs/oss | Ray Chiang | Genmao Yu |
+| [HADOOP-14799](https://issues.apache.org/jira/browse/HADOOP-14799) | Update nimbus-jose-jwt to 4.41.1 |  Major | . | Ray Chiang | Ray Chiang |
+| [HADOOP-14997](https://issues.apache.org/jira/browse/HADOOP-14997) |  Add hadoop-aliyun as dependency of hadoop-cloud-storage |  Minor | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-12801](https://issues.apache.org/jira/browse/HDFS-12801) | RBF: Set MountTableResolver as default file resolver |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [YARN-7430](https://issues.apache.org/jira/browse/YARN-7430) | Enable user re-mapping for Docker containers by default |  Blocker | security, yarn | Eric Yang | Eric Yang |
+| [YARN-6128](https://issues.apache.org/jira/browse/YARN-6128) | Add support for AMRMProxy HA |  Major | amrmproxy, nodemanager | Subru Krishnan | Botong Huang |
+| [HADOOP-15024](https://issues.apache.org/jira/browse/HADOOP-15024) | AliyunOSS: support user agent configuration and include that & Hadoop version information to oss server |  Major | fs, fs/oss | SammiChen | SammiChen |
+| [HDFS-12858](https://issues.apache.org/jira/browse/HDFS-12858) | RBF: Add router admin commands usage in HDFS commands reference doc |  Minor | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12835](https://issues.apache.org/jira/browse/HDFS-12835) | RBF: Fix Javadoc parameter errors |  Minor | . | Wei Yan | Wei Yan |
+| [YARN-7587](https://issues.apache.org/jira/browse/YARN-7587) | Skip dispatching opportunistic containers to nodes whose queue is already full |  Major | . | Weiwei Yang | Weiwei Yang |
+| [HDFS-12396](https://issues.apache.org/jira/browse/HDFS-12396) | Webhdfs file system should get delegation token from kms provider. |  Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [YARN-6704](https://issues.apache.org/jira/browse/YARN-6704) | Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService |  Major | . | Botong Huang | Botong Huang |
+| [HDFS-12875](https://issues.apache.org/jira/browse/HDFS-12875) | RBF: Complete logic for -readonly option of dfsrouteradmin add command |  Major | . | Yiqun Lin | ĆĆ±igo Goiri |
+| [YARN-7630](https://issues.apache.org/jira/browse/YARN-7630) | Fix AMRMToken rollover handling in AMRMProxy |  Minor | . | Botong Huang | Botong Huang |
+| [HDFS-12937](https://issues.apache.org/jira/browse/HDFS-12937) | RBF: Add more unit tests for router admin commands |  Major | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-12988](https://issues.apache.org/jira/browse/HDFS-12988) | RBF: Mount table entries not properly updated in the local cache |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | backport HADOOP-15086 rename fix to branch-2 |  Major | fs/azure | Thomas Marquardt | Thomas Marquardt |
+| [YARN-7716](https://issues.apache.org/jira/browse/YARN-7716) | metricsTimeStart and metricsTimeEnd should be all lower case in the doc |  Major | timelinereader | Haibo Chen | Haibo Chen |
+| [HDFS-12802](https://issues.apache.org/jira/browse/HDFS-12802) | RBF: Control MountTableResolver cache size |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13028](https://issues.apache.org/jira/browse/HDFS-13028) | RBF: Fix spurious TestRouterRpc#testProxyGetStats |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [YARN-5094](https://issues.apache.org/jira/browse/YARN-5094) | some YARN container events have timestamp of -1 |  Critical | . | Sangjin Lee | Haibo Chen |
+| [YARN-7782](https://issues.apache.org/jira/browse/YARN-7782) | Enable user re-mapping for Docker containers in yarn-default.xml |  Blocker | security, yarn | Eric Yang | Eric Yang |
+| [HDFS-12772](https://issues.apache.org/jira/browse/HDFS-12772) | RBF: Federation Router State State Store internal API |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13042](https://issues.apache.org/jira/browse/HDFS-13042) | RBF: Heartbeat Router State |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13049](https://issues.apache.org/jira/browse/HDFS-13049) | RBF: Inconsistent Router OPTS config in branch-2 and branch-3 |  Minor | . | Wei Yan | Wei Yan |
+| [HDFS-12574](https://issues.apache.org/jira/browse/HDFS-12574) | Add CryptoInputStream to WebHdfsFileSystem read call. |  Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [HDFS-13044](https://issues.apache.org/jira/browse/HDFS-13044) | RBF: Add a safe mode for the Router |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13043](https://issues.apache.org/jira/browse/HDFS-13043) | RBF: Expose the state of the Routers in the federation |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode |  Major | . | ĆĆ±igo Goiri | Yiqun Lin |
+| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters |  Major | . | ĆĆ±igo Goiri | Yiqun Lin |
+| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI |  Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries |  Minor | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue |  Major | federation, hdfs | maobaolong | maobaolong |
+| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration |  Major | . | Tao Jie | Yiqun Lin |
+| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns |  Minor | . | Wei Yan | Chao Sun |
+| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path |  Major | hdfs | wangzhiyuan | wangzhiyuan |
+| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue |  Major | federation, hdfs | Weiwei Wu | Weiwei Wu |
+| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection |  Minor | . | Wei Yan | Ekanth S |
+| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions |  Minor | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures |  Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use |  Major | hdfs, test | maobaolong | maobaolong |
+| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement |  Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed |  Major | hdfs | maobaolong | maobaolong |
+| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router |  Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module |  Major | . | ĆĆ±igo Goiri | Wei Yan |
+| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf |  Minor | . | ĆĆ±igo Goiri | Ekanth S |
+| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS |  Major | fs | ĆĆ±igo Goiri | Wei Yan |
+| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver |  Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon |  Minor | . | liuhongtong | liuhongtong |
+| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml |  Major | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-8010](https://issues.apache.org/jira/browse/YARN-8010) | Add config in FederationRMFailoverProxy to not bypass facade cache when failing over |  Minor | . | Botong Huang | Botong Huang |
+| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports |  Minor | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction |  Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
+| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router |  Major | . | ĆĆ±igo Goiri | ĆĆ±igo Goiri |
+| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 |  Major | fs/adl | Ray Chiang | Ray Chiang |
+| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism |  Major | fs/oss | Genmao Yu | Genmao Yu |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15149](https://issues.apache.org/jira/browse/HADOOP-15149) | CryptoOutputStream should implement StreamCapabilities |  Major | fs | Mike Drob | Xiao Chen |
+| [YARN-7691](https://issues.apache.org/jira/browse/YARN-7691) | Add Unit Tests for ContainersLauncher |  Major | . | Sampada Dehankar | Sampada Dehankar |
+| [HADOOP-15177](https://issues.apache.org/jira/browse/HADOOP-15177) | Update the release year to 2018 |  Blocker | build | Akira Ajisaka | Bharat Viswanadham |
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7192749c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
new file mode 100644
index 0000000..bed70b1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
@@ -0,0 +1,88 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop"  2.9.1 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | *Major* | **Incorporate Aliyun OSS file system implementation**
+
+Aliyun OSS is widely used among Chinaā€™s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | *Major* | **AliyunOSS: backport Aliyun OSS module to branch-2**
+
+Aliyun OSS is widely used among Chinaā€™s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss:// scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | **RBF: Document Router and State Store metrics**
+
+This JIRA makes following change:
+Change Router metrics context from 'router' to 'dfs'.
+
+
+---
+
+* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | **RBF: Add ACL support for mount table**
+
+Mount tables support ACL, The users won't be able to modify their own entries (we are assuming these old (no-permissions before) mount table with owner:superuser, group:supergroup, permission:755 as the default permissions).  The fix way is login as superuser to modify these mount table entries.
+
+
+---
+
+* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | **Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath**
+
+Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath.
+
+
+---
+
+* [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | *Major* | **backport HADOOP-15086 rename fix to branch-2**
+
+[WASB] Fix Azure implementation of Filesystem.rename to ensure that at most one operation succeeds when there are multiple, concurrent rename operations targeting the same destination file.
+
+
+---
+
+* [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | *Major* | **AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance**
+
+Support multi-thread pre-read in AliyunOSSInputStream to improve the sequential read performance from Hadoop to Aliyun OSS.
+
+
+---
+
+* [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | *Major* | **RBF: Fix doc error setting up client**
+
+Fix the document error of setting up HFDS Router Federation
+
+
+---
+
+* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store**
+
+Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7192749c/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
new file mode 100644
index 0000000..a5d87c7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Mon Apr 16 12:03:07 UTC 2018 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 2.9.1"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.9.1.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.9.1.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/1.3.1/json-smart-1.3.1.jar:/maven/org/apache/directory/serv
 er/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.9.1.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus
 /jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/commons/commons-lang3/3.4/commons-lang3-3.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoug
 htworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.9.1.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortb
 ay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
 -0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.9.1 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7192749c/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index cfaa698..5f83da3 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
         <activeByDefault>false</activeByDefault>
       </activation>
       <properties>
-        <jdiff.stable.api>3.0.2</jdiff.stable.api>
+        <jdiff.stable.api>2.9.1</jdiff.stable.api>
         <jdiff.stability>-unstable</jdiff.stability>
         <!-- Commented out for HADOOP-11776 -->
         <!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-7003. DRAINING state of queues is not recovered after RM restart. Contributed by Tao Yang.

Posted by xy...@apache.org.
YARN-7003. DRAINING state of queues is not recovered after RM restart. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9db20b3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9db20b3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9db20b3c

Branch: refs/heads/HDDS-4
Commit: 9db20b3cdf9ca7344a30245d6f81ea84d4452840
Parents: 082bcd4
Author: Weiwei Yang <ww...@apache.org>
Authored: Fri May 11 10:47:04 2018 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../scheduler/capacity/AbstractCSQueue.java     | 15 +++++
 .../scheduler/capacity/CapacityScheduler.java   |  7 +++
 .../scheduler/capacity/TestQueueState.java      | 60 ++++++++++++++++++++
 3 files changed, 82 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db20b3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 651d0e9..67b676b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -1244,4 +1244,19 @@ public abstract class AbstractCSQueue implements CSQueue {
   public Map<String, Float> getUserWeights() {
     return userWeights;
   }
+
+  public void recoverDrainingState() {
+    try {
+      this.writeLock.lock();
+      if (getState() == QueueState.STOPPED) {
+        updateQueueState(QueueState.DRAINING);
+      }
+      LOG.info("Recover draining state for queue " + this.getQueuePath());
+      if (getParent() != null && getParent().getState() == QueueState.STOPPED) {
+        ((AbstractCSQueue) getParent()).recoverDrainingState();
+      }
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db20b3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 1d6c104..162d3bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -808,6 +809,12 @@ public class CapacityScheduler extends
           throw new QueueInvalidException(queueErrorMsg);
         }
       }
+      // When recovering apps in this queue but queue is in STOPPED state,
+      // that means its previous state was DRAINING. So we auto transit
+      // the state to DRAINING for recovery.
+      if (queue.getState() == QueueState.STOPPED) {
+        ((LeafQueue) queue).recoverDrainingState();
+      }
       // Submit to the queue
       try {
         queue.submitApplication(applicationId, user, queueName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db20b3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
index 9f2933e..0a39e99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
@@ -32,7 +32,12 @@ import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
@@ -197,4 +202,59 @@ public class TestQueueState {
         .thenCallRealMethod();
     return application;
   }
+
+  @Test (timeout = 30000)
+  public void testRecoverDrainingStateAfterRMRestart() throws Exception {
+    // init conf
+    CapacitySchedulerConfiguration newConf =
+        new CapacitySchedulerConfiguration();
+    newConf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    newConf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,
+        false);
+    newConf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    newConf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 1);
+    newConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[]{Q1});
+    newConf.setQueues(Q1_PATH, new String[]{Q2});
+    newConf.setCapacity(Q1_PATH, 100);
+    newConf.setCapacity(Q2_PATH, 100);
+
+    // init state store
+    MemoryRMStateStore newMemStore = new MemoryRMStateStore();
+    newMemStore.init(newConf);
+    // init RM & NMs & Nodes
+    MockRM rm = new MockRM(newConf, newMemStore);
+    rm.start();
+    MockNM nm = rm.registerNode("h1:1234", 204800);
+
+    // submit an app, AM is running on nm1
+    RMApp app = rm.submitApp(1024, "appname", "appuser", null, Q2);
+    MockRM.launchAM(app, rm, nm);
+    rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
+    // update queue state to STOPPED
+    newConf.setState(Q1_PATH, QueueState.STOPPED);
+    CapacityScheduler capacityScheduler =
+        (CapacityScheduler) rm.getRMContext().getScheduler();
+    capacityScheduler.reinitialize(newConf, rm.getRMContext());
+    // current queue state should be DRAINING
+    Assert.assertEquals(QueueState.DRAINING,
+        capacityScheduler.getQueue(Q2).getState());
+    Assert.assertEquals(QueueState.DRAINING,
+        capacityScheduler.getQueue(Q1).getState());
+
+    // RM restart
+    rm = new MockRM(newConf, newMemStore);
+    rm.start();
+    rm.registerNode("h1:1234", 204800);
+
+    // queue state should be DRAINING after app recovered
+    rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
+    capacityScheduler = (CapacityScheduler) rm.getRMContext().getScheduler();
+    Assert.assertEquals(QueueState.DRAINING,
+        capacityScheduler.getQueue(Q2).getState());
+    Assert.assertEquals(QueueState.DRAINING,
+        capacityScheduler.getQueue(Q1).getState());
+
+    // close rm
+    rm.close();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDDS-29. Fix TestStorageContainerManager#testRpcPermission. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-29. Fix TestStorageContainerManager#testRpcPermission. Contributed by  Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2358e7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2358e7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2358e7c

Branch: refs/heads/HDDS-4
Commit: f2358e7ced1367adfca74fdb02756a107e047ec8
Parents: 2af3970
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 14 09:09:25 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:10 2018 -0700

----------------------------------------------------------------------
 .../scm/server/SCMClientProtocolServer.java     | 15 +++++++++--
 .../scm/server/StorageContainerManager.java     |  9 +------
 .../ozone/TestStorageContainerManager.java      | 27 ++++++++++----------
 3 files changed, 28 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2358e7c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 246d053..d73cccd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.protocolPB
     .StorageContainerLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -137,17 +138,26 @@ public class SCMClientProtocolServer implements
     getClientRpcServer().join();
   }
 
+  @VisibleForTesting
+  public String getRpcRemoteUsername() {
+    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
+    return user == null ? null : user.getUserName();
+  }
+
   @Override
   public ContainerInfo allocateContainer(HddsProtos.ReplicationType
       replicationType, HddsProtos.ReplicationFactor factor,
       String owner) throws IOException {
-    getScm().checkAdminAccess();
+    String remoteUser = getRpcRemoteUsername();
+    getScm().checkAdminAccess(remoteUser);
     return scm.getScmContainerManager()
         .allocateContainer(replicationType, factor, owner);
   }
 
   @Override
   public ContainerInfo getContainer(long containerID) throws IOException {
+    String remoteUser = getRpcRemoteUsername();
+    getScm().checkAdminAccess(remoteUser);
     return scm.getScmContainerManager()
         .getContainer(containerID);
   }
@@ -161,7 +171,8 @@ public class SCMClientProtocolServer implements
 
   @Override
   public void deleteContainer(long containerID) throws IOException {
-    getScm().checkAdminAccess();
+    String remoteUser = getRpcRemoteUsername();
+    getScm().checkAdminAccess(remoteUser);
     scm.getScmContainerManager().deleteContainer(containerID);
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2358e7c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index a235976..21c797d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -663,14 +663,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl
     return scmBlockManager;
   }
 
-  @VisibleForTesting
-  public String getPpcRemoteUsername() {
-    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
-    return user == null ? null : user.getUserName();
-  }
-
-  public void checkAdminAccess() throws IOException {
-    String remoteUser = getPpcRemoteUsername();
+  public void checkAdminAccess(String remoteUser) throws IOException {
     if (remoteUser != null) {
       if (!scmAdminUsernames.contains(remoteUser)) {
         throw new IOException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2358e7c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 140c8b2..8e8df7a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
@@ -108,19 +109,19 @@ public class TestStorageContainerManager {
     MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build();
     cluster.waitForClusterToBeReady();
     try {
-      String fakeUser = fakeRemoteUsername;
-      StorageContainerManager mockScm = Mockito.spy(
-          cluster.getStorageContainerManager());
-      Mockito.when(mockScm.getPpcRemoteUsername())
-          .thenReturn(fakeUser);
+
+      SCMClientProtocolServer mockClientServer = Mockito.spy(
+          cluster.getStorageContainerManager().getClientProtocolServer());
+      Mockito.when(mockClientServer.getRpcRemoteUsername())
+          .thenReturn(fakeRemoteUsername);
 
       try {
-        mockScm.getClientProtocolServer().deleteContainer(
+        mockClientServer.deleteContainer(
             ContainerTestHelper.getTestContainerID());
         fail("Operation should fail, expecting an IOException here.");
       } catch (Exception e) {
         if (expectPermissionDenied) {
-          verifyPermissionDeniedException(e, fakeUser);
+          verifyPermissionDeniedException(e, fakeRemoteUsername);
         } else {
           // If passes permission check, it should fail with
           // container not exist exception.
@@ -130,7 +131,7 @@ public class TestStorageContainerManager {
       }
 
       try {
-        ContainerInfo container2 = mockScm.getClientProtocolServer()
+        ContainerInfo container2 = mockClientServer
             .allocateContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE,  "OZONE");
         if (expectPermissionDenied) {
@@ -139,11 +140,11 @@ public class TestStorageContainerManager {
           Assert.assertEquals(1, container2.getPipeline().getMachines().size());
         }
       } catch (Exception e) {
-        verifyPermissionDeniedException(e, fakeUser);
+        verifyPermissionDeniedException(e, fakeRemoteUsername);
       }
 
       try {
-        ContainerInfo container3 = mockScm.getClientProtocolServer()
+        ContainerInfo container3 = mockClientServer
             .allocateContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, "OZONE");
         if (expectPermissionDenied) {
@@ -152,16 +153,16 @@ public class TestStorageContainerManager {
           Assert.assertEquals(1, container3.getPipeline().getMachines().size());
         }
       } catch (Exception e) {
-        verifyPermissionDeniedException(e, fakeUser);
+        verifyPermissionDeniedException(e, fakeRemoteUsername);
       }
 
       try {
-        mockScm.getClientProtocolServer().getContainer(
+        mockClientServer.getContainer(
             ContainerTestHelper.getTestContainerID());
         fail("Operation should fail, expecting an IOException here.");
       } catch (Exception e) {
         if (expectPermissionDenied) {
-          verifyPermissionDeniedException(e, fakeUser);
+          verifyPermissionDeniedException(e, fakeRemoteUsername);
         } else {
           // If passes permission check, it should fail with
           // key not exist exception.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HDDS-19. Update ozone to latest ratis snapshot build (0.1.1-alpha-d7d7061-SNAPSHOT). Contributed by Lokesh Jain.

Posted by xy...@apache.org.
HDDS-19. Update ozone to latest ratis snapshot build (0.1.1-alpha-d7d7061-SNAPSHOT). Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8d7a99b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8d7a99b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8d7a99b

Branch: refs/heads/HDDS-4
Commit: e8d7a99b88dbe73fcd211d75e742d7949f4cf7dd
Parents: 307710d
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Mon May 14 22:25:03 2018 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:10 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   | 13 ++++++++
 .../apache/hadoop/ozone/OzoneConfigKeys.java    | 11 +++++++
 .../common/src/main/resources/ozone-default.xml | 12 +++++++
 .../server/ratis/XceiverServerRatis.java        | 33 ++++++++++++++++++--
 hadoop-project/pom.xml                          | 10 ++----
 5 files changed, 69 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8d7a99b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 92fa3d7..83a431e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -19,6 +19,9 @@ package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.ratis.util.TimeDuration;
+
+import java.util.concurrent.TimeUnit;
 
 /**
  * This class contains constants for configuration keys used in SCM.
@@ -62,6 +65,16 @@ public final class ScmConfigKeys {
       "dfs.container.ratis.segment.preallocated.size";
   public static final int
       DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 1024;
+  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
+      "dfs.ratis.client.request.timeout.duration";
+  public static final TimeDuration
+      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
+      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
+  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
+      "dfs.ratis.server.request.timeout.duration";
+  public static final TimeDuration
+      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
+      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
 
   // TODO : this is copied from OzoneConsts, may need to move to a better place
   public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8d7a99b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 8d99350..affe298 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.ratis.util.TimeDuration;
 
 /**
  * This class contains constants for configuration keys used in Ozone.
@@ -226,6 +227,16 @@ public final class OzoneConfigKeys {
       = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
   public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
       "dfs.container.ratis.datanode.storage.dir";
+  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
+      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY;
+  public static final TimeDuration
+      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
+      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT;
+  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
+      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
+  public static final TimeDuration
+      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
+      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
 
   public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
       "ozone.web.authentication.kerberos.principal";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8d7a99b/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 6998a85..deb286d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -120,6 +120,18 @@
     </description>
   </property>
   <property>
+    <name>dfs.ratis.client.request.timeout.duration</name>
+    <value>3s</value>
+    <tag>OZONE, RATIS, MANAGEMENT</tag>
+    <description>The timeout duration for ratis client request.</description>
+  </property>
+  <property>
+    <name>dfs.ratis.server.request.timeout.duration</name>
+    <value>3s</value>
+    <tag>OZONE, RATIS, MANAGEMENT</tag>
+    <description>The timeout duration for ratis server request.</description>
+  </property>
+  <property>
     <name>ozone.container.report.interval</name>
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8d7a99b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 4bd55f1..46def09 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.container.common.transport.server
     .XceiverServerSpi;
 import org.apache.ratis.RaftConfigKeys;
 import org.apache.ratis.RatisHelper;
+import org.apache.ratis.client.RaftClientConfigKeys;
 import org.apache.ratis.conf.RaftProperties;
 import org.apache.ratis.grpc.GrpcConfigKeys;
 import org.apache.ratis.netty.NettyConfigKeys;
@@ -78,11 +79,31 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     final int numWriteChunkThreads = conf.getInt(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
         OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT);
+    TimeUnit timeUnit =
+        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
+            .getUnit();
+    long duration = conf.getTimeDuration(
+        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
+        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
+            .getDuration(), timeUnit);
+    final TimeDuration clientRequestTimeout =
+        TimeDuration.valueOf(duration, timeUnit);
+    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
+        .getUnit();
+    duration = conf.getTimeDuration(
+        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY,
+        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
+            .getDuration(), timeUnit);
+    final TimeDuration serverRequestTimeout =
+        TimeDuration.valueOf(duration, timeUnit);
 
     Objects.requireNonNull(dd, "id == null");
     this.port = port;
-    RaftProperties serverProperties = newRaftProperties(rpc, port,
-        storageDir, maxChunkSize, raftSegmentSize, raftSegmentPreallocatedSize);
+    RaftProperties serverProperties =
+        newRaftProperties(rpc, port, storageDir, maxChunkSize, raftSegmentSize,
+            raftSegmentPreallocatedSize);
+    setRequestTimeout(serverProperties, clientRequestTimeout,
+        serverRequestTimeout);
 
     writeChunkExecutor =
         new ThreadPoolExecutor(numWriteChunkThreads, numWriteChunkThreads,
@@ -99,6 +120,14 @@ public final class XceiverServerRatis implements XceiverServerSpi {
         .build();
   }
 
+  private static void setRequestTimeout(RaftProperties serverProperties,
+      TimeDuration clientRequestTimeout, TimeDuration serverRequestTimeout) {
+    RaftClientConfigKeys.Rpc
+        .setRequestTimeout(serverProperties, clientRequestTimeout);
+    RaftServerConfigKeys.Rpc
+        .setRequestTimeout(serverProperties, serverRequestTimeout);
+  }
+
   private static RaftProperties newRaftProperties(
       RpcType rpc, int port, String storageDir, int scmChunkSize,
       int raftSegmentSize, int raftSegmentPreallocatedSize) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8d7a99b/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 862a693..bcb816e 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -97,7 +97,7 @@
     <ldap-api.version>1.0.0-M33</ldap-api.version>
 
     <!-- Apache Ratis version -->
-    <ratis.version>0.1.1-alpha-8fd74ed-SNAPSHOT</ratis.version>
+    <ratis.version>0.1.1-alpha-d7d7061-SNAPSHOT</ratis.version>
     <jcache.version>1.0-alpha-1</jcache.version>
     <ehcache.version>3.3.1</ehcache.version>
     <hikari.version>2.4.12</hikari.version>
@@ -106,7 +106,7 @@
 
     <!-- Maven protoc compiler -->
     <protobuf-maven-plugin.version>0.5.1</protobuf-maven-plugin.version>
-    <protobuf-compile.version>3.1.0</protobuf-compile.version>
+    <protobuf-compile.version>3.5.0</protobuf-compile.version>
     <os-maven-plugin.version>1.5.0.Final</os-maven-plugin.version>
 
     <!-- define the Java language version used by the compiler -->
@@ -881,12 +881,6 @@
       </dependency>
 
       <dependency>
-        <groupId>org.jctools</groupId>
-        <artifactId>jctools-core</artifactId>
-        <version>1.2.1</version>
-      </dependency>
-
-      <dependency>
         <groupId>org.apache.ratis</groupId>
         <artifactId>ratis-proto-shaded</artifactId>
         <version>${ratis.version}</version>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDDS-16. Remove Pipeline from Datanode Container Protocol protobuf definition. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-16. Remove Pipeline from Datanode Container Protocol protobuf definition. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78d9241d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78d9241d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78d9241d

Branch: refs/heads/HDDS-4
Commit: 78d9241dad8ba48da07bc443ffd31e39eba98e0c
Parents: e229dcb
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu May 10 14:49:58 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../hdds/scm/storage/ChunkOutputStream.java     |  4 +-
 .../org/apache/hadoop/hdds/client/BlockID.java  | 12 ++++
 .../scm/storage/ContainerProtocolCalls.java     | 12 ++--
 .../container/common/helpers/ChunkInfo.java     |  5 +-
 .../ozone/container/common/helpers/KeyData.java |  7 +--
 .../main/proto/DatanodeContainerProtocol.proto  | 63 +++++++++++++-------
 .../container/common/helpers/ContainerData.java | 19 +++---
 .../ozone/container/common/impl/Dispatcher.java |  5 --
 .../scm/cli/container/InfoContainerHandler.java |  5 +-
 .../client/io/OzoneContainerTranslation.java    |  2 +-
 .../ozone/container/ContainerTestHelper.java    | 10 ++--
 .../genesis/BenchMarkDatanodeDispatcher.java    |  9 +--
 12 files changed, 85 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 325f110..8fce00d 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;
@@ -85,7 +85,7 @@ public class ChunkOutputStream extends OutputStream {
     KeyValue keyValue = KeyValue.newBuilder()
         .setKey("TYPE").setValue("KEY").build();
     this.containerKeyData = KeyData.newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .addMetadata(keyValue);
     this.xceiverClientManager = xceiverClientManager;
     this.xceiverClient = xceiverClient;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
index 7236af7..355a36d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.hdds.client;
 
 import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 /**
@@ -56,4 +57,15 @@ public class BlockID {
     return new BlockID(blockID.getContainerID(),
         blockID.getLocalID());
   }
+
+  public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() {
+    return ContainerProtos.DatanodeBlockID.newBuilder().
+        setContainerID(containerID).setLocalID(localID).build();
+  }
+
+  public static BlockID getFromProtobuf(ContainerProtos.DatanodeBlockID blockID) {
+    return new BlockID(blockID.getContainerID(),
+        blockID.getLocalID());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 970e932..ca388d9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
     .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;
@@ -133,7 +133,7 @@ public final class ContainerProtocolCalls  {
         ChunkInfo chunk, BlockID blockID, String traceID) throws IOException {
     ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
         .newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .setChunkData(chunk);
     String id = xceiverClient.getPipeline().getLeader().getUuidString();
     ContainerCommandRequestProto request = ContainerCommandRequestProto
@@ -163,7 +163,7 @@ public final class ContainerProtocolCalls  {
       throws IOException {
     WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
         .newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .setChunkData(chunk)
         .setData(data);
     String id = xceiverClient.getPipeline().getLeader().getUuidString();
@@ -195,7 +195,7 @@ public final class ContainerProtocolCalls  {
       throws IOException {
 
     KeyData containerKeyData =
-        KeyData.newBuilder().setBlockID(blockID.getProtobuf())
+        KeyData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
             .build();
     PutKeyRequestProto.Builder createKeyRequest =
         PutKeyRequestProto.newBuilder()
@@ -241,7 +241,6 @@ public final class ContainerProtocolCalls  {
     ContainerProtos.ContainerData.Builder containerData = ContainerProtos
         .ContainerData.newBuilder();
     containerData.setContainerID(containerID);
-    createRequest.setPipeline(client.getPipeline().getProtobufMessage());
     createRequest.setContainerData(containerData.build());
 
     String id = client.getPipeline().getLeader().getUuidString();
@@ -321,7 +320,6 @@ public final class ContainerProtocolCalls  {
     ReadContainerRequestProto.Builder readRequest =
         ReadContainerRequestProto.newBuilder();
     readRequest.setContainerID(containerID);
-    readRequest.setPipeline(client.getPipeline().getProtobufMessage());
     String id = client.getPipeline().getLeader().getUuidString();
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -348,7 +346,7 @@ public final class ContainerProtocolCalls  {
       BlockID blockID, String traceID) throws IOException {
     KeyData containerKeyData = KeyData
         .newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .build();
 
     GetKeyRequestProto.Builder getKey = GetKeyRequestProto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
index aa1fe74..7cf95a9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.helpers;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 import java.io.IOException;
 import java.util.Map;
@@ -111,8 +110,8 @@ public class ChunkInfo {
     }
 
     for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      HddsProtos.KeyValue.Builder keyValBuilder =
-          HddsProtos.KeyValue.newBuilder();
+      ContainerProtos.KeyValue.Builder keyValBuilder =
+          ContainerProtos.KeyValue.newBuilder();
       builder.addMetadata(keyValBuilder.setKey(entry.getKey())
           .setValue(entry.getValue()).build());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
index c3de5ed..c485c7f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;
@@ -76,11 +75,11 @@ public class KeyData {
   public ContainerProtos.KeyData getProtoBufMessage() {
     ContainerProtos.KeyData.Builder builder =
         ContainerProtos.KeyData.newBuilder();
-    builder.setBlockID(this.blockID.getProtobuf());
+    builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
     builder.addAllChunks(this.chunks);
     for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      HddsProtos.KeyValue.Builder keyValBuilder =
-          HddsProtos.KeyValue.newBuilder();
+      ContainerProtos.KeyValue.Builder keyValBuilder =
+          ContainerProtos.KeyValue.newBuilder();
       builder.addMetadata(keyValBuilder.setKey(entry.getKey())
           .setValue(entry.getValue()).build());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index e7494ee..172b660 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -27,9 +27,7 @@
 option java_package = "org.apache.hadoop.hdds.protocol.proto";
 option java_outer_classname = "ContainerProtos";
 option java_generate_equals_and_hash = true;
-package hadoop.hdds;
-import "hdfs.proto";
-import "hdds.proto";
+package hadoop.hdds.datanode;
 
 /**
  * Commands that are used to manipulate the state of containers on a datanode.
@@ -134,6 +132,28 @@ enum Result {
   CLOSED_CONTAINER_RETRY = 27;
 }
 
+/**
+ * Block ID that uniquely identify a block in Datanode.
+ */
+message DatanodeBlockID {
+  required int64 containerID = 1;
+  required int64 localID = 2;
+}
+
+message KeyValue {
+  required string key = 1;
+  optional string value = 2;
+}
+
+/**
+ * Lifecycle states of a container in Datanode.
+ */
+enum ContainerLifeCycleState {
+    OPEN = 1;
+    CLOSING = 2;
+    CLOSED = 3;
+}
+
 message ContainerCommandRequestProto {
   required Type cmdType = 1; // Type of the command
 
@@ -205,7 +225,7 @@ message ContainerData {
   optional int64 bytesUsed = 6;
   optional int64 size = 7;
   optional int64 keyCount = 8;
-  optional LifeCycleState state = 9 [default = OPEN];
+  optional ContainerLifeCycleState state = 9 [default = OPEN];
 }
 
 message ContainerMeta {
@@ -215,26 +235,23 @@ message ContainerMeta {
 
 // Container Messages.
 message  CreateContainerRequestProto {
-  required Pipeline pipeline = 1;
-  required ContainerData containerData = 2;
+  required ContainerData containerData = 1;
 }
 
 message  CreateContainerResponseProto {
 }
 
 message  ReadContainerRequestProto {
-  required Pipeline pipeline = 1;
-  required int64 containerID = 2;
+  required int64 containerID = 1;
 }
 
 message  ReadContainerResponseProto {
-  optional ContainerData containerData = 2;
+  optional ContainerData containerData = 1;
 }
 
 message  UpdateContainerRequestProto {
-  required Pipeline pipeline = 1;
-  required ContainerData containerData = 2;
-  optional bool forceUpdate = 3 [default = false];
+  required ContainerData containerData = 1;
+  optional bool forceUpdate = 2 [default = false];
 }
 
 message  UpdateContainerResponseProto {
@@ -262,12 +279,12 @@ message CloseContainerRequestProto {
 }
 
 message CloseContainerResponseProto {
-  optional string hash = 2;
-  optional int64 containerID = 3;
+  optional string hash = 1;
+  optional int64 containerID = 2;
 }
 
 message KeyData {
-  required BlockID blockID = 1;
+  required DatanodeBlockID blockID = 1;
   optional int64 flags = 2; // for future use.
   repeated KeyValue metadata = 3;
   repeated ChunkInfo chunks = 4;
@@ -291,7 +308,7 @@ message  GetKeyResponseProto  {
 
 
 message  DeleteKeyRequestProto {
-  required BlockID blockID = 1;
+  required DatanodeBlockID blockID = 1;
 }
 
 message   DeleteKeyResponseProto {
@@ -300,7 +317,7 @@ message   DeleteKeyResponseProto {
 message  ListKeyRequestProto {
   required int64 containerID = 1;
   optional int64 startLocalID = 2;
-  required uint32 count = 4;
+  required uint32 count = 3;
 
 }
 
@@ -325,7 +342,7 @@ enum Stage {
 }
 
 message  WriteChunkRequestProto  {
-  required BlockID blockID = 1;
+  required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
   optional bytes data = 3;
   optional Stage stage = 4 [default = COMBINED];
@@ -335,26 +352,26 @@ message  WriteChunkResponseProto {
 }
 
 message  ReadChunkRequestProto  {
-  required BlockID blockID = 1;
+  required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
 }
 
 message  ReadChunkResponseProto {
-  required BlockID blockID = 1;
+  required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
   required bytes data = 3;
 }
 
 message  DeleteChunkRequestProto {
-  required BlockID blockID = 1;
-  required ChunkInfo chunkData = 3;
+  required DatanodeBlockID blockID = 1;
+  required ChunkInfo chunkData = 2;
 }
 
 message  DeleteChunkResponseProto {
 }
 
 message  ListChunkRequestProto {
-  required BlockID blockID = 1;
+  required DatanodeBlockID blockID = 1;
   required string prevChunkName = 2;
   required uint32 count = 3;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index c20282a..799cca3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -22,7 +22,8 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerLifeCycleState;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
 
@@ -48,7 +49,7 @@ public class ContainerData {
   private AtomicLong bytesUsed;
   private long maxSize;
   private long containerID;
-  private HddsProtos.LifeCycleState state;
+  private ContainerLifeCycleState state;
 
   /**
    * Constructs a  ContainerData Object.
@@ -63,7 +64,7 @@ public class ContainerData {
         ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB;
     this.bytesUsed =  new AtomicLong(0L);
     this.containerID = containerID;
-    this.state = HddsProtos.LifeCycleState.OPEN;
+    this.state = ContainerLifeCycleState.OPEN;
   }
 
   /**
@@ -133,8 +134,8 @@ public class ContainerData {
     builder.setState(this.getState());
 
     for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      HddsProtos.KeyValue.Builder keyValBuilder =
-          HddsProtos.KeyValue.newBuilder();
+      ContainerProtos.KeyValue.Builder keyValBuilder =
+          ContainerProtos.KeyValue.newBuilder();
       builder.addMetadata(keyValBuilder.setKey(entry.getKey())
           .setValue(entry.getValue()).build());
     }
@@ -250,11 +251,11 @@ public class ContainerData {
     return containerID;
   }
 
-  public synchronized  void setState(HddsProtos.LifeCycleState state) {
+  public synchronized void setState(ContainerLifeCycleState state) {
     this.state = state;
   }
 
-  public synchronized HddsProtos.LifeCycleState getState() {
+  public synchronized ContainerLifeCycleState getState() {
     return this.state;
   }
 
@@ -263,7 +264,7 @@ public class ContainerData {
    * @return - boolean
    */
   public synchronized  boolean isOpen() {
-    return HddsProtos.LifeCycleState.OPEN == state;
+    return ContainerLifeCycleState.OPEN == state;
   }
 
   /**
@@ -271,7 +272,7 @@ public class ContainerData {
    */
   public synchronized void closeContainer() {
     // TODO: closed or closing here
-    setState(HddsProtos.LifeCycleState.CLOSED);
+    setState(ContainerLifeCycleState.CLOSED);
 
     // Some thing brain dead for now. name + Time stamp of when we get the close
     // container message.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
index 46bd842..8d1b17c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.container.common.impl;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
@@ -393,10 +392,6 @@ public class Dispatcher implements ContainerDispatcher {
         msg.getCreateContainer().getContainerData(), conf);
     Preconditions.checkNotNull(cData, "Container data is null");
 
-    Pipeline pipeline = Pipeline.getFromProtoBuf(
-        msg.getCreateContainer().getPipeline());
-    Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
-
     this.containerManager.createContainer(cData);
     return ContainerUtils.getContainerResponse(msg);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index 36d46c0..843d9db 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -27,7 +27,8 @@ import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerLifeCycleState;
 
 import java.io.IOException;
 import java.util.stream.Collectors;
@@ -77,7 +78,7 @@ public class InfoContainerHandler extends OzoneCommandHandler {
     // Print container report info.
     logOut("Container id: %s", containerID);
     String openStatus =
-        containerData.getState() == HddsProtos.LifeCycleState.OPEN ? "OPEN" :
+        containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" :
             "CLOSED";
     logOut("Container State: %s", openStatus);
     if (!containerData.getHash().isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
index 2132bc8..e7215ef 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
@@ -38,7 +38,7 @@ final class OzoneContainerTranslation {
   public static KeyData containerKeyDataForRead(BlockID blockID) {
     return KeyData
         .newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index bcd08d7..fed725c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -28,11 +28,10 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
     .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -204,7 +203,7 @@ public final class ContainerTestHelper {
 
     Pipeline newPipeline =
         new Pipeline(pipeline.getPipelineChannel());
-    writeRequest.setBlockID(blockID.getProtobuf());
+    writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
 
     byte[] data = getData(datalen);
     ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
@@ -361,7 +360,6 @@ public final class ContainerTestHelper {
         .ContainerData.newBuilder();
     containerData.setContainerID(containerID);
     createRequest.setContainerData(containerData.build());
-    createRequest.setPipeline(pipeline.getProtobufMessage());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -399,7 +397,6 @@ public final class ContainerTestHelper {
     }
     Pipeline pipeline =
         ContainerTestHelper.createSingleNodePipeline();
-    updateRequestBuilder.setPipeline(pipeline.getProtobufMessage());
     updateRequestBuilder.setContainerData(containerData.build());
 
     ContainerCommandRequestProto.Builder request =
@@ -469,7 +466,8 @@ public final class ContainerTestHelper {
    */
   public static ContainerCommandRequestProto getKeyRequest(
       Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
-    HddsProtos.BlockID blockID = putKeyRequest.getKeyData().getBlockID();
+    ContainerProtos.DatanodeBlockID blockID =
+        putKeyRequest.getKeyData().getBlockID();
     LOG.trace("getKey: blockID={}", blockID);
 
     ContainerProtos.GetKeyRequestProto.Builder getRequest =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d9241d/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index b73f108..2da6874 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.container.common.impl.Dispatcher;
 import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
 import org.apache.hadoop.util.Time;
 import org.openjdk.jmh.annotations.Benchmark;
@@ -168,8 +167,6 @@ public class BenchMarkDatanodeDispatcher {
   private ContainerCommandRequestProto getCreateContainerCommand(long containerID) {
     CreateContainerRequestProto.Builder createRequest =
         CreateContainerRequestProto.newBuilder();
-    createRequest.setPipeline(
-        new Pipeline(pipelineChannel).getProtobufMessage());
     createRequest.setContainerData(
         ContainerData.newBuilder().setContainerID(
             containerID).build());
@@ -187,7 +184,7 @@ public class BenchMarkDatanodeDispatcher {
       BlockID blockID, String chunkName) {
     WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
         .newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .setChunkData(getChunkInfo(blockID, chunkName))
         .setData(data);
 
@@ -204,7 +201,7 @@ public class BenchMarkDatanodeDispatcher {
       BlockID blockID, String chunkName) {
     ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
         .newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .setChunkData(getChunkInfo(blockID, chunkName));
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
@@ -258,7 +255,7 @@ public class BenchMarkDatanodeDispatcher {
       BlockID blockID, String chunkKey) {
     ContainerProtos.KeyData.Builder builder =  ContainerProtos.KeyData
         .newBuilder()
-        .setBlockID(blockID.getProtobuf())
+        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .addChunks(getChunkInfo(blockID, chunkKey));
     return builder.build();
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-7715. Support NM promotion/demotion of running containers. (Miklos Szegedi via Haibo Chen)

Posted by xy...@apache.org.
YARN-7715. Support NM promotion/demotion of running containers. (Miklos Szegedi via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3183b35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3183b35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3183b35

Branch: refs/heads/HDDS-4
Commit: d3183b352fb19d03482a00986cc04fad57cd842d
Parents: ee7daf0
Author: Haibo Chen <ha...@apache.org>
Authored: Thu May 10 11:01:01 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../CGroupsBlkioResourceHandlerImpl.java        |  6 ++
 .../CGroupsCpuResourceHandlerImpl.java          | 75 ++++++++++++--------
 .../CGroupsMemoryResourceHandlerImpl.java       | 73 +++++++++++--------
 .../NetworkPacketTaggingHandlerImpl.java        |  6 ++
 .../linux/resources/ResourceHandler.java        | 16 ++++-
 .../linux/resources/ResourceHandlerChain.java   | 18 +++++
 .../TrafficControlBandwidthHandlerImpl.java     |  6 ++
 .../resources/fpga/FpgaResourceHandlerImpl.java |  6 ++
 .../resources/gpu/GpuResourceHandlerImpl.java   |  6 ++
 .../resources/numa/NumaResourceHandlerImpl.java |  6 ++
 .../scheduler/ContainerScheduler.java           | 30 ++++++++
 .../TestCGroupsCpuResourceHandlerImpl.java      |  1 +
 .../TestCGroupsMemoryResourceHandlerImpl.java   |  1 +
 .../TestResourcePluginManager.java              |  6 ++
 .../TestContainerSchedulerQueuing.java          | 10 +++
 15 files changed, 202 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
index 42fc634..2c402c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
@@ -156,6 +156,12 @@ public class CGroupsBlkioResourceHandlerImpl implements DiskResourceHandler {
   }
 
   @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    return null;
+  }
+
+  @Override
   public List<PrivilegedOperation> postComplete(ContainerId containerId)
       throws ResourceHandlerException {
     cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.BLKIO,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java
index 7ea7be2..37221f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java
@@ -186,38 +186,8 @@ public class CGroupsCpuResourceHandlerImpl implements CpuResourceHandler {
   public List<PrivilegedOperation> preStart(Container container)
       throws ResourceHandlerException {
     String cgroupId = container.getContainerId().toString();
-    Resource containerResource = container.getResource();
     cGroupsHandler.createCGroup(CPU, cgroupId);
-    try {
-      int containerVCores = containerResource.getVirtualCores();
-      ContainerTokenIdentifier id = container.getContainerTokenIdentifier();
-      if (id != null && id.getExecutionType() ==
-          ExecutionType.OPPORTUNISTIC) {
-        cGroupsHandler
-            .updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_SHARES,
-                String.valueOf(CPU_DEFAULT_WEIGHT_OPPORTUNISTIC));
-      } else {
-        int cpuShares = CPU_DEFAULT_WEIGHT * containerVCores;
-        cGroupsHandler
-            .updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_SHARES,
-                String.valueOf(cpuShares));
-      }
-      if (strictResourceUsageMode) {
-        if (nodeVCores != containerVCores) {
-          float containerCPU =
-              (containerVCores * yarnProcessors) / (float) nodeVCores;
-          int[] limits = getOverallLimits(containerCPU);
-          cGroupsHandler.updateCGroupParam(CPU, cgroupId,
-              CGroupsHandler.CGROUP_CPU_PERIOD_US, String.valueOf(limits[0]));
-          cGroupsHandler.updateCGroupParam(CPU, cgroupId,
-              CGroupsHandler.CGROUP_CPU_QUOTA_US, String.valueOf(limits[1]));
-        }
-      }
-    } catch (ResourceHandlerException re) {
-      cGroupsHandler.deleteCGroup(CPU, cgroupId);
-      LOG.warn("Could not update cgroup for container", re);
-      throw re;
-    }
+    updateContainer(container);
     List<PrivilegedOperation> ret = new ArrayList<>();
     ret.add(new PrivilegedOperation(
         PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
@@ -233,6 +203,49 @@ public class CGroupsCpuResourceHandlerImpl implements CpuResourceHandler {
   }
 
   @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    Resource containerResource = container.getResource();
+    String cgroupId = container.getContainerId().toString();
+    File cgroup = new File(cGroupsHandler.getPathForCGroup(CPU, cgroupId));
+    if (cgroup.exists()) {
+      try {
+        int containerVCores = containerResource.getVirtualCores();
+        ContainerTokenIdentifier id = container.getContainerTokenIdentifier();
+        if (id != null && id.getExecutionType() ==
+            ExecutionType.OPPORTUNISTIC) {
+          cGroupsHandler
+              .updateCGroupParam(CPU, cgroupId,
+                  CGroupsHandler.CGROUP_CPU_SHARES,
+                  String.valueOf(CPU_DEFAULT_WEIGHT_OPPORTUNISTIC));
+        } else {
+          int cpuShares = CPU_DEFAULT_WEIGHT * containerVCores;
+          cGroupsHandler
+              .updateCGroupParam(CPU, cgroupId,
+                  CGroupsHandler.CGROUP_CPU_SHARES,
+                  String.valueOf(cpuShares));
+        }
+        if (strictResourceUsageMode) {
+          if (nodeVCores != containerVCores) {
+            float containerCPU =
+                (containerVCores * yarnProcessors) / (float) nodeVCores;
+            int[] limits = getOverallLimits(containerCPU);
+            cGroupsHandler.updateCGroupParam(CPU, cgroupId,
+                CGroupsHandler.CGROUP_CPU_PERIOD_US, String.valueOf(limits[0]));
+            cGroupsHandler.updateCGroupParam(CPU, cgroupId,
+                CGroupsHandler.CGROUP_CPU_QUOTA_US, String.valueOf(limits[1]));
+          }
+        }
+      } catch (ResourceHandlerException re) {
+        cGroupsHandler.deleteCGroup(CPU, cgroupId);
+        LOG.warn("Could not update cgroup for container", re);
+        throw re;
+      }
+    }
+    return null;
+  }
+
+  @Override
   public List<PrivilegedOperation> postComplete(ContainerId containerId)
       throws ResourceHandlerException {
     cGroupsHandler.deleteCGroup(CPU, containerId.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
index 558751f..2d1585e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 
+import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -119,43 +120,53 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler {
   }
 
   @Override
-  public List<PrivilegedOperation> preStart(Container container)
+  public List<PrivilegedOperation> updateContainer(Container container)
       throws ResourceHandlerException {
-
     String cgroupId = container.getContainerId().toString();
-    //memory is in MB
-    long containerSoftLimit =
-        (long) (container.getResource().getMemorySize() * this.softLimit);
-    long containerHardLimit = container.getResource().getMemorySize();
-    cGroupsHandler.createCGroup(MEMORY, cgroupId);
-    if (enforce) {
-      try {
-        cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
-            CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES,
-            String.valueOf(containerHardLimit) + "M");
-        ContainerTokenIdentifier id = container.getContainerTokenIdentifier();
-        if (id != null && id.getExecutionType() ==
-            ExecutionType.OPPORTUNISTIC) {
-          cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
-              CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES,
-              String.valueOf(OPPORTUNISTIC_SOFT_LIMIT) + "M");
+    File cgroup = new File(cGroupsHandler.getPathForCGroup(MEMORY, cgroupId));
+    if (cgroup.exists()) {
+      //memory is in MB
+      long containerSoftLimit =
+          (long) (container.getResource().getMemorySize() * this.softLimit);
+      long containerHardLimit = container.getResource().getMemorySize();
+      if (enforce) {
+        try {
           cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
-              CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS,
-              String.valueOf(OPPORTUNISTIC_SWAPPINESS));
-        } else {
-          cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
-              CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES,
-              String.valueOf(containerSoftLimit) + "M");
-          cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
-              CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS,
-              String.valueOf(swappiness));
+              CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES,
+              String.valueOf(containerHardLimit) + "M");
+          ContainerTokenIdentifier id = container.getContainerTokenIdentifier();
+          if (id != null && id.getExecutionType() ==
+              ExecutionType.OPPORTUNISTIC) {
+            cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
+                CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES,
+                String.valueOf(OPPORTUNISTIC_SOFT_LIMIT) + "M");
+            cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
+                CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS,
+                String.valueOf(OPPORTUNISTIC_SWAPPINESS));
+          } else {
+            cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
+                CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES,
+                String.valueOf(containerSoftLimit) + "M");
+            cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
+                CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS,
+                String.valueOf(swappiness));
+          }
+        } catch (ResourceHandlerException re) {
+          cGroupsHandler.deleteCGroup(MEMORY, cgroupId);
+          LOG.warn("Could not update cgroup for container", re);
+          throw re;
         }
-      } catch (ResourceHandlerException re) {
-        cGroupsHandler.deleteCGroup(MEMORY, cgroupId);
-        LOG.warn("Could not update cgroup for container", re);
-        throw re;
       }
     }
+    return null;
+  }
+
+  @Override
+  public List<PrivilegedOperation> preStart(Container container)
+      throws ResourceHandlerException {
+    String cgroupId = container.getContainerId().toString();
+    cGroupsHandler.createCGroup(MEMORY, cgroupId);
+    updateContainer(container);
     List<PrivilegedOperation> ret = new ArrayList<>();
     ret.add(new PrivilegedOperation(
         PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
index 1580e2c..3f6d4b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
@@ -128,6 +128,12 @@ public class NetworkPacketTaggingHandlerImpl
     return null;
   }
 
+  @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    return null;
+  }
+
   /**
    * Cleanup operation once container is completed - deletes cgroup.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandler.java
index 3dfc86b..35c6460 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandler.java
@@ -61,7 +61,7 @@ public interface ResourceHandler {
   /**
    * Require state for container that was already launched
    *
-   * @param containerId if of the container being reacquired.
+   * @param containerId id of the container being reacquired.
    * @return (possibly empty) list of operations that require elevated
    * privileges
    * @throws ResourceHandlerException
@@ -71,7 +71,19 @@ public interface ResourceHandler {
       throws ResourceHandlerException;
 
   /**
-   * Perform any tasks necessary after container completion
+   * Update state for container that was already launched
+   *
+   * @param container the container being updated.
+   * @return (possibly empty) list of operations that require elevated
+   * privileges
+   * @throws ResourceHandlerException
+   */
+
+  List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException;
+
+  /**
+   * Perform any tasks necessary after container completion.
    * @param containerId of the container that was completed.
    * @return (possibly empty) list of operations that require elevated
    * privileges

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java
index 72bf30c..2fc301a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java
@@ -101,6 +101,24 @@ public class ResourceHandlerChain implements ResourceHandler {
   }
 
   @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    List<PrivilegedOperation> allOperations = new
+        ArrayList<PrivilegedOperation>();
+
+    for (ResourceHandler resourceHandler : resourceHandlers) {
+      List<PrivilegedOperation> handlerOperations =
+          resourceHandler.updateContainer(container);
+
+      if (handlerOperations != null) {
+        allOperations.addAll(handlerOperations);
+      }
+
+    }
+    return allOperations;
+  }
+
+  @Override
   public List<PrivilegedOperation> postComplete(ContainerId containerId)
       throws ResourceHandlerException {
     List<PrivilegedOperation> allOperations = new

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
index 126685f..c04e935 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
@@ -203,6 +203,12 @@ public class TrafficControlBandwidthHandlerImpl
     return null;
   }
 
+  @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    return null;
+  }
+
   /**
    * Returns total bytes sent per container to be used for metrics tracking
    * purposes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
index bf3d9b0..11f7114 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
@@ -206,6 +206,12 @@ public class FpgaResourceHandlerImpl implements ResourceHandler {
   }
 
   @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    return null;
+  }
+
+  @Override
   public List<PrivilegedOperation> postComplete(ContainerId containerId) throws ResourceHandlerException {
     allocator.cleanupAssignFpgas(containerId.toString());
     cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.DEVICES,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
index 8ddc227..587fcb4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
@@ -165,6 +165,12 @@ public class GpuResourceHandlerImpl implements ResourceHandler {
   }
 
   @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    return null;
+  }
+
+  @Override
   public synchronized List<PrivilegedOperation> postComplete(
       ContainerId containerId) throws ResourceHandlerException {
     gpuAllocator.cleanupAssignGpus(containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
index 128daca..8ffba24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
@@ -95,6 +95,12 @@ public class NumaResourceHandlerImpl implements ResourceHandler {
   }
 
   @Override
+  public List<PrivilegedOperation> updateContainer(Container container)
+      throws ResourceHandlerException {
+    return null;
+  }
+
+  @Override
   public List<PrivilegedOperation> postComplete(ContainerId containerId)
       throws ResourceHandlerException {
     numaResourceAllocator.releaseNumaResource(containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 57368ab..5cdcf41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -34,6 +34,9 @@ import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerChain;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor
     .ChangeMonitoringContainerResourceEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
@@ -105,6 +108,9 @@ public class ContainerScheduler extends AbstractService implements
 
   private Boolean usePauseEventForPreemption = false;
 
+  @VisibleForTesting
+  ResourceHandlerChain resourceHandlerChain = null;
+
   /**
    * Instantiate a Container Scheduler.
    * @param context NodeManager Context.
@@ -123,6 +129,24 @@ public class ContainerScheduler extends AbstractService implements
   @Override
   public void serviceInit(Configuration conf) throws Exception {
     super.serviceInit(conf);
+    try {
+      if (resourceHandlerChain == null) {
+        resourceHandlerChain = ResourceHandlerModule
+            .getConfiguredResourceHandlerChain(conf, context);
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Resource handler chain enabled = " + (resourceHandlerChain
+            != null));
+      }
+      if (resourceHandlerChain != null) {
+        LOG.debug("Bootstrapping resource handler chain");
+        resourceHandlerChain.bootstrap(conf);
+      }
+    } catch (ResourceHandlerException e) {
+      LOG.error("Failed to bootstrap configured resource subsystems! ", e);
+      throw new IOException(
+          "Failed to bootstrap configured resource subsystems!");
+    }
     this.usePauseEventForPreemption =
         conf.getBoolean(
             YarnConfiguration.NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION,
@@ -218,6 +242,12 @@ public class ContainerScheduler extends AbstractService implements
               updateEvent.getContainer());
         }
       }
+      try {
+        resourceHandlerChain.updateContainer(updateEvent.getContainer());
+      } catch (Exception ex) {
+        LOG.warn(String.format("Could not update resources on " +
+            "continer update of %s", containerId), ex);
+      }
       startPendingContainers(maxOppQueueLength <= 0);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java
index 006b060..842fc6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java
@@ -49,6 +49,7 @@ public class TestCGroupsCpuResourceHandlerImpl {
   @Before
   public void setup() {
     mockCGroupsHandler = mock(CGroupsHandler.class);
+    when(mockCGroupsHandler.getPathForCGroup(any(), any())).thenReturn(".");
     cGroupsCpuResourceHandler =
         new CGroupsCpuResourceHandlerImpl(mockCGroupsHandler);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
index 78ccc61..416b4fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
@@ -45,6 +45,7 @@ public class TestCGroupsMemoryResourceHandlerImpl {
   @Before
   public void setup() {
     mockCGroupsHandler = mock(CGroupsHandler.class);
+    when(mockCGroupsHandler.getPathForCGroup(any(), any())).thenReturn(".");
     cGroupsMemoryResourceHandler =
         new CGroupsMemoryResourceHandlerImpl(mockCGroupsHandler);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
index bcadf76..6ed7c56 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
@@ -117,6 +117,12 @@ public class TestResourcePluginManager extends NodeManagerTestBase {
     }
 
     @Override
+    public List<PrivilegedOperation> updateContainer(Container container)
+        throws ResourceHandlerException {
+      return null;
+    }
+
+    @Override
     public List<PrivilegedOperation> postComplete(ContainerId containerId)
         throws ResourceHandlerException {
       return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3183b35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index 5c72e7e..1da7e4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerChain;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
@@ -72,7 +73,11 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.LoggerFactory;
 
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
 /**
  * Tests to verify that the {@link ContainerScheduler} is able to queue and
@@ -1183,6 +1188,8 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
 
     ContainerScheduler containerScheduler =
         containerManager.getContainerScheduler();
+    containerScheduler.resourceHandlerChain =
+        mock(ResourceHandlerChain.class);
     // Ensure two containers are properly queued.
     Assert.assertEquals(1, containerScheduler.getNumQueuedContainers());
     Assert.assertEquals(0,
@@ -1246,6 +1253,9 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
         ContainerEventType.INIT_CONTAINER,
         ContainerEventType.UPDATE_CONTAINER_TOKEN,
         ContainerEventType.CONTAINER_LAUNCHED), containerEventTypes);
+    verify(containerScheduler.resourceHandlerChain,
+        times(1))
+        .updateContainer(any());
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: YARN-8243. Flex down should remove instance with largest component instance ID first. Contributed by Gour Saha

Posted by xy...@apache.org.
YARN-8243. Flex down should remove instance with largest component instance ID first. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c74d1720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c74d1720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c74d1720

Branch: refs/heads/HDDS-4
Commit: c74d172032092b905b857abb7972cee19620af3a
Parents: ffe99d4
Author: Billie Rinaldi <bi...@apache.org>
Authored: Fri May 11 07:27:35 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/service/ServiceManager.java     |  5 +--
 .../hadoop/yarn/service/ServiceMaster.java      | 39 +++++++++----------
 .../yarn/service/component/Component.java       | 26 ++++++++-----
 .../component/instance/ComponentInstance.java   |  9 +----
 .../yarn/service/TestYarnNativeServices.java    | 40 +++++++++++++++++++-
 5 files changed, 75 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c74d1720/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
index 869d7f3..e6a38dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
@@ -237,12 +237,11 @@ public class ServiceManager implements EventHandler<ServiceEvent> {
    * ServiceMaster.checkAndUpdateServiceState here to make it easy to fix
    * this in future.
    */
-  public void checkAndUpdateServiceState(boolean isIncrement) {
+  public void checkAndUpdateServiceState() {
     writeLock.lock();
     try {
       if (!getState().equals(State.UPGRADING)) {
-        ServiceMaster.checkAndUpdateServiceState(this.scheduler,
-            isIncrement);
+        ServiceMaster.checkAndUpdateServiceState(this.scheduler);
       }
     } finally {
       writeLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c74d1720/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
index 0383a65..28881ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
@@ -264,30 +264,25 @@ public class ServiceMaster extends CompositeService {
   // This method should be called whenever there is an increment or decrement
   // of a READY state component of a service
   public static synchronized void checkAndUpdateServiceState(
-      ServiceScheduler scheduler, boolean isIncrement) {
+      ServiceScheduler scheduler) {
     ServiceState curState = scheduler.getApp().getState();
-    if (!isIncrement) {
-      // set it to STARTED every time a component moves out of STABLE state
-      scheduler.getApp().setState(ServiceState.STARTED);
-    } else {
-      // otherwise check the state of all components
-      boolean isStable = true;
-      for (org.apache.hadoop.yarn.service.api.records.Component comp : scheduler
-          .getApp().getComponents()) {
-        if (comp.getState() !=
-            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE) {
-          isStable = false;
-          break;
-        }
+    // Check the state of all components
+    boolean isStable = true;
+    for (org.apache.hadoop.yarn.service.api.records.Component comp : scheduler
+        .getApp().getComponents()) {
+      if (comp.getState() !=
+          org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE) {
+        isStable = false;
+        break;
       }
-      if (isStable) {
-        scheduler.getApp().setState(ServiceState.STABLE);
-      } else {
-        // mark new state as started only if current state is stable, otherwise
-        // leave it as is
-        if (curState == ServiceState.STABLE) {
-          scheduler.getApp().setState(ServiceState.STARTED);
-        }
+    }
+    if (isStable) {
+      scheduler.getApp().setState(ServiceState.STABLE);
+    } else {
+      // mark new state as started only if current state is stable, otherwise
+      // leave it as is
+      if (curState == ServiceState.STABLE) {
+        scheduler.getApp().setState(ServiceState.STARTED);
       }
     }
     if (curState != scheduler.getApp().getState()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c74d1720/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index e115841..7979c19 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -323,7 +323,7 @@ public class Component implements EventHandler<ComponentEvent> {
             org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
         component.getScheduler().getApp().setState(ServiceState.STARTED);
         return FLEXING;
-      } else if (delta < 0){
+      } else if (delta < 0) {
         delta = 0 - delta;
         // scale down
         LOG.info("[FLEX DOWN COMPONENT " + component.getName()
@@ -343,7 +343,9 @@ public class Component implements EventHandler<ComponentEvent> {
           instance.destroy();
         }
         checkAndUpdateComponentState(component, false);
-        return STABLE;
+        return component.componentSpec.getState()
+            == org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE
+                ? STABLE : FLEXING;
       } else {
         LOG.info("[FLEX COMPONENT " + component.getName() + "]: already has " +
             event.getDesired() + " instances, ignoring");
@@ -440,7 +442,7 @@ public class Component implements EventHandler<ComponentEvent> {
               component.componentSpec.getState());
         }
         // component state change will trigger re-check of service state
-        component.context.getServiceManager().checkAndUpdateServiceState(true);
+        component.context.getServiceManager().checkAndUpdateServiceState();
       }
     } else {
       // container moving out of READY state could be because of FLEX down so
@@ -449,14 +451,18 @@ public class Component implements EventHandler<ComponentEvent> {
           .value() < component.componentMetrics.containersDesired.value()) {
         component.componentSpec.setState(
             org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
-        if (curState != component.componentSpec.getState()) {
-          LOG.info("[COMPONENT {}] state changed from {} -> {}",
-              component.componentSpec.getName(), curState,
-              component.componentSpec.getState());
-        }
-        // component state change will trigger re-check of service state
-        component.context.getServiceManager().checkAndUpdateServiceState(false);
+      } else if (component.componentMetrics.containersReady
+          .value() == component.componentMetrics.containersDesired.value()) {
+        component.componentSpec.setState(
+            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+      }
+      if (curState != component.componentSpec.getState()) {
+        LOG.info("[COMPONENT {}] state changed from {} -> {}",
+            component.componentSpec.getName(), curState,
+            component.componentSpec.getState());
       }
+      // component state change will trigger re-check of service state
+      component.context.getServiceManager().checkAndUpdateServiceState();
     }
     // when the service is stable then the state of component needs to
     // transition to stable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c74d1720/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 9d0a56b..4aca0ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -581,14 +581,7 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
 
   @Override
   public int compareTo(ComponentInstance to) {
-    long delta = containerStartedTime - to.containerStartedTime;
-    if (delta == 0) {
-      return getCompInstanceId().compareTo(to.getCompInstanceId());
-    } else if (delta < 0) {
-      return -1;
-    } else {
-      return 1;
-    }
+    return getCompInstanceId().compareTo(to.getCompInstanceId());
   }
 
   @Override public boolean equals(Object o) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c74d1720/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 5b608e3..ae209b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -484,8 +484,37 @@ public class TestYarnNativeServices extends ServiceTestUtils {
       }
     }
 
-    // Flex compa up to 4, which is more containers than the no of NMs
+    // Flex compa up to 5, which is more containers than the no of NMs
     Map<String, Long> compCounts = new HashMap<>();
+    compCounts.put("compa", 5L);
+    exampleApp.getComponent("compa").setNumberOfContainers(5L);
+    client.flexByRestService(exampleApp.getName(), compCounts);
+    try {
+      // 10 secs is enough for the container to be started. The down side of
+      // this test is that it has to wait that long. Setting a higher wait time
+      // will add to the total time taken by tests to run.
+      waitForServiceToBeStable(client, exampleApp, 10000);
+      Assert.fail("Service should not be in a stable state. It should throw "
+          + "a timeout exception.");
+    } catch (Exception e) {
+      // Check that service state is not STABLE and only 3 containers are
+      // running and the fourth one should not get allocated.
+      service = client.getStatus(exampleApp.getName());
+      component = service.getComponent("compa");
+      Assert.assertNotEquals("Service state should not be STABLE",
+          ServiceState.STABLE, service.getState());
+      Assert.assertEquals("Component state should be FLEXING",
+          ComponentState.FLEXING, component.getState());
+      Assert.assertEquals("3 containers are expected to be running", 3,
+          component.getContainers().size());
+    }
+
+    // Flex compa down to 4 now, which is still more containers than the no of
+    // NMs. This tests the usecase that flex down does not kill any of the
+    // currently running containers since the required number of containers are
+    // still higher than the currently running number of containers. However,
+    // component state will still be FLEXING and service state not STABLE.
+    compCounts = new HashMap<>();
     compCounts.put("compa", 4L);
     exampleApp.getComponent("compa").setNumberOfContainers(4L);
     client.flexByRestService(exampleApp.getName(), compCounts);
@@ -509,6 +538,15 @@ public class TestYarnNativeServices extends ServiceTestUtils {
           component.getContainers().size());
     }
 
+    // Finally flex compa down to 3, which is exactly the number of containers
+    // currently running. This will bring the component and service states to
+    // STABLE.
+    compCounts = new HashMap<>();
+    compCounts.put("compa", 3L);
+    exampleApp.getComponent("compa").setNumberOfContainers(3L);
+    client.flexByRestService(exampleApp.getName(), compCounts);
+    waitForServiceToBeStable(client, exampleApp);
+
     LOG.info("Stop/destroy service {}", exampleApp);
     client.actionStop(exampleApp.getName(), true);
     client.actionDestroy(exampleApp.getName());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDDS-52. Fix TestSCMCli#testInfoContainer. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-52. Fix TestSCMCli#testInfoContainer.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4c96b9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4c96b9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4c96b9b

Branch: refs/heads/HDDS-4
Commit: e4c96b9b549f5575860126485a64f95c183bede0
Parents: ca35c2e
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 10:07:32 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4c96b9b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 19bc423..732221a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -331,7 +331,7 @@ public class TestSCMCli {
 
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
     expected = String
-        .format(formatStrWithHash, container.getContainerID(), openStatus,
+        .format(formatStr, container.getContainerID(), openStatus,
             data.getDBPath(), data.getContainerPath(), "",
             datanodeDetails.getHostName(), datanodeDetails.getHostName());
     assertEquals(expected, out.toString());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDDS-18. Ozone Shell should use RestClient and RpcClient. Contributed by Lokesh Jain.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
index 181f6cc..5fc443e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -37,7 +37,6 @@ import java.nio.file.Paths;
 public class DeleteBucketHandler extends Handler {
   private String volumeName;
   private String bucketName;
-  private String rootName;
 
   /**
    * Executes the Client Calls.
@@ -52,7 +51,7 @@ public class DeleteBucketHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.DELETE_BUCKET)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : deleteBucket is missing");
     }
 
@@ -60,7 +59,7 @@ public class DeleteBucketHandler extends Handler {
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 2) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume and bucket name required in delete Bucket");
     }
 
@@ -72,16 +71,7 @@ public class DeleteBucketHandler extends Handler {
       System.out.printf("Bucket Name : %s%n", bucketName);
     }
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    } else {
-      rootName = System.getProperty("user.name");
-    }
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-
-    OzoneVolume vol = client.getVolume(volumeName);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
     vol.deleteBucket(bucketName);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
index 321c4c2..b3ca4e5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
@@ -19,9 +19,10 @@ package org.apache.hadoop.ozone.web.ozShell.bucket;
 
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -39,7 +40,6 @@ import java.nio.file.Paths;
 public class InfoBucketHandler extends Handler {
   private String volumeName;
   private String bucketName;
-  private String rootName;
 
   /**
    * Executes the Client Calls.
@@ -54,7 +54,7 @@ public class InfoBucketHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.INFO_BUCKET)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : infoBucket is missing");
     }
 
@@ -63,7 +63,7 @@ public class InfoBucketHandler extends Handler {
     Path path = Paths.get(ozoneURI.getPath());
 
     if (path.getNameCount() < 2) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume and bucket name required in info Bucket");
     }
 
@@ -75,20 +75,11 @@ public class InfoBucketHandler extends Handler {
       System.out.printf("Bucket Name : %s%n", bucketName);
     }
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    } else {
-      rootName = System.getProperty("user.name");
-    }
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-
-    OzoneVolume vol = client.getVolume(volumeName);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
     OzoneBucket bucket = vol.getBucket(bucketName);
 
     System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        bucket.getBucketInfo().toJsonString()));
+        JsonUtils.toJsonString(OzoneClientUtils.asBucketInfo(bucket))));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
index ea77cae..655022a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
@@ -19,13 +19,14 @@
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 
@@ -34,15 +35,15 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
-import java.util.stream.Collectors;
 
 /**
  * Executes List Bucket.
  */
 public class ListBucketHandler extends Handler {
   private String volumeName;
-  private String rootName;
 
   /**
    * Executes the Client Calls.
@@ -57,7 +58,7 @@ public class ListBucketHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.LIST_BUCKET)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : listBucket is missing");
     }
 
@@ -65,30 +66,20 @@ public class ListBucketHandler extends Handler {
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 1) {
-      throw new OzoneRestClientException("volume is required in listBucket");
+      throw new OzoneClientException("volume is required in listBucket");
     }
 
     volumeName = path.getName(0).toString();
 
-
     if (cmd.hasOption(Shell.VERBOSE)) {
       System.out.printf("Volume Name : %s%n", volumeName);
     }
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    } else {
-      rootName = System.getProperty("user.name");
-    }
-
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-
-    String length = null;
+    int maxBuckets = Integer.MAX_VALUE;
     if (cmd.hasOption(Shell.LIST_LENGTH)) {
-      length = cmd.getOptionValue(Shell.LIST_LENGTH);
+      String length = cmd.getOptionValue(Shell.LIST_LENGTH);
       OzoneUtils.verifyMaxKeyLength(length);
+      maxBuckets = Integer.parseInt(length);
     }
 
     String startBucket = null;
@@ -101,13 +92,21 @@ public class ListBucketHandler extends Handler {
       prefix = cmd.getOptionValue(Shell.PREFIX);
     }
 
-    OzoneVolume vol = client.getVolume(volumeName);
-    List<OzoneBucket> bucketList = vol.listBuckets(length, startBucket, prefix);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    Iterator<OzoneBucket> bucketIterator = vol.listBuckets(prefix, startBucket);
+    List<BucketInfo> bucketList = new ArrayList<>();
+    while (maxBuckets > 0 && bucketIterator.hasNext()) {
+      BucketInfo bucketInfo = OzoneClientUtils.asBucketInfo(bucketIterator.next());
+      bucketList.add(bucketInfo);
+      maxBuckets -= 1;
+    }
 
-    List<BucketInfo> jsonData = bucketList.stream()
-        .map(OzoneBucket::getBucketInfo).collect(Collectors.toList());
+    if (cmd.hasOption(Shell.VERBOSE)) {
+      System.out.printf("Found : %d buckets for volume : %s ",
+          bucketList.size(), volumeName);
+    }
     System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        JsonUtils.toJsonString(jsonData)));
+        JsonUtils.toJsonString(bucketList)));
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
index 781a00c..aff0e19 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
@@ -18,9 +18,11 @@
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -31,6 +33,9 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
 
 /**
  * Allows users to add and remove acls and from a bucket.
@@ -38,13 +43,12 @@ import java.nio.file.Paths;
 public class UpdateBucketHandler extends Handler {
   private String volumeName;
   private String bucketName;
-  private String rootName;
 
   @Override
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.UPDATE_BUCKET)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : updateBucket is missing");
     }
 
@@ -53,7 +57,7 @@ public class UpdateBucketHandler extends Handler {
     Path path = Paths.get(ozoneURI.getPath());
 
     if (path.getNameCount() < 2) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume and bucket name required in update bucket");
     }
 
@@ -65,30 +69,27 @@ public class UpdateBucketHandler extends Handler {
       System.out.printf("Bucket Name : %s%n", bucketName);
     }
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    } else {
-      rootName = System.getProperty("user.name");
-    }
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-
-    OzoneVolume vol = client.getVolume(volumeName);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
     if (cmd.hasOption(Shell.ADD_ACLS)) {
       String aclString = cmd.getOptionValue(Shell.ADD_ACLS);
       String[] aclArray = aclString.split(",");
-      vol.addAcls(bucketName, aclArray);
+      List<OzoneAcl> aclList =
+          Arrays.stream(aclArray).map(acl -> OzoneAcl.parseAcl(acl))
+              .collect(Collectors.toList());
+      bucket.addAcls(aclList);
     }
 
     if (cmd.hasOption(Shell.REMOVE_ACLS)) {
       String aclString = cmd.getOptionValue(Shell.REMOVE_ACLS);
       String[] aclArray = aclString.split(",");
-      vol.removeAcls(bucketName, aclArray);
+      List<OzoneAcl> aclList =
+          Arrays.stream(aclArray).map(acl -> OzoneAcl.parseAcl(acl))
+              .collect(Collectors.toList());
+      bucket.removeAcls(aclList);
     }
 
-    OzoneBucket bucket = vol.getBucket(bucketName);
     System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        bucket.getBucketInfo().toJsonString()));
+        JsonUtils.toJsonString(OzoneClientUtils.asBucketInfo(bucket))));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
index 1ad2588..fccabe7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
@@ -19,9 +19,9 @@
 package org.apache.hadoop.ozone.web.ozShell.keys;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -36,7 +36,6 @@ import java.nio.file.Paths;
  * Executes Delete Key.
  */
 public class DeleteKeyHandler extends Handler {
-  private String userName;
   private String volumeName;
   private String bucketName;
   private String keyName;
@@ -53,23 +52,15 @@ public class DeleteKeyHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.DELETE_KEY)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : deleteKey is missing");
     }
 
-
-    if (cmd.hasOption(Shell.USER)) {
-      userName = cmd.getOptionValue(Shell.USER);
-    } else {
-      userName = System.getProperty("user.name");
-    }
-
-
     String ozoneURIString = cmd.getOptionValue(Shell.DELETE_KEY);
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 3) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume/bucket/key name required in deleteKey");
     }
 
@@ -84,13 +75,8 @@ public class DeleteKeyHandler extends Handler {
       System.out.printf("Key Name : %s%n", keyName);
     }
 
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(userName);
-
-
-    OzoneVolume vol = client.getVolume(volumeName);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
     OzoneBucket bucket = vol.getBucket(bucketName);
     bucket.deleteKey(keyName);
-
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
index a56bbc0..34620b4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
@@ -20,24 +20,34 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CHUNK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CHUNK_SIZE_KEY;
+
 /**
  * Gets an existing key.
  */
 public class GetKeyHandler extends Handler {
-  private String userName;
   private String volumeName;
   private String bucketName;
   private String keyName;
@@ -56,26 +66,19 @@ public class GetKeyHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.GET_KEY)) {
-      throw new OzoneRestClientException("Incorrect call : getKey is missing");
+      throw new OzoneClientException("Incorrect call : getKey is missing");
     }
 
     if (!cmd.hasOption(Shell.FILE)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "get key needs a file path to download to");
     }
 
-    if (cmd.hasOption(Shell.USER)) {
-      userName = cmd.getOptionValue(Shell.USER);
-    } else {
-      userName = System.getProperty("user.name");
-    }
-
-
     String ozoneURIString = cmd.getOptionValue(Shell.GET_KEY);
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 3) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume/bucket/key name required in putKey");
     }
 
@@ -97,19 +100,28 @@ public class GetKeyHandler extends Handler {
 
 
     if (dataFile.exists()) {
-      throw new OzoneRestClientException(fileName +
+      throw new OzoneClientException(fileName +
                                          "exists. Download will overwrite an " +
                                          "existing file. Aborting.");
     }
 
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(userName);
-
-    client.getKey(volumeName, bucketName, keyName, dataFilePath);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    OzoneInputStream keyInputStream = bucket.readKey(keyName);
+    if (dataFilePath != null) {
+      FileOutputStream outputStream = new FileOutputStream(dataFile);
+      IOUtils.copyBytes(keyInputStream, outputStream, new OzoneConfiguration()
+          .getInt(OZONE_SCM_CHUNK_SIZE_KEY, OZONE_SCM_CHUNK_SIZE_DEFAULT));
+      outputStream.close();
+    } else {
+      throw new OzoneClientException(
+          "Can not access the file \"" + fileName + "\"");
+    }
     if(cmd.hasOption(Shell.VERBOSE)) {
       FileInputStream stream = new FileInputStream(dataFile);
       String hash = DigestUtils.md5Hex(stream);
       System.out.printf("Downloaded file hash : %s%n", hash);
+      stream.close();
     }
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
index 3e9b2da..3fcdda9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
@@ -25,10 +25,11 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneKey;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.web.utils.JsonUtils;
  * Executes Info Object.
  */
 public class InfoKeyHandler extends Handler {
-  private String userName;
   private String volumeName;
   private String bucketName;
   private String keyName;
@@ -55,22 +55,14 @@ public class InfoKeyHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.INFO_KEY)) {
-      throw new OzoneRestClientException("Incorrect call : infoKey is missing");
+      throw new OzoneClientException("Incorrect call : infoKey is missing");
     }
 
-
-    if (cmd.hasOption(Shell.USER)) {
-      userName = cmd.getOptionValue(Shell.USER);
-    } else {
-      userName = System.getProperty("user.name");
-    }
-
-
     String ozoneURIString = cmd.getOptionValue(Shell.INFO_KEY);
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 3) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume/bucket/key name required in infoKey");
     }
 
@@ -85,14 +77,11 @@ public class InfoKeyHandler extends Handler {
       System.out.printf("Key Name : %s%n", keyName);
     }
 
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(userName);
-
-    OzoneVolume vol = client.getVolume(volumeName);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
     OzoneBucket bucket = vol.getBucket(bucketName);
-    OzoneKey key = bucket.getKeyInfo(keyName);
+    OzoneKey key = bucket.getKey(keyName);
 
     System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        key.getObjectInfo().toJsonString()));
+        JsonUtils.toJsonString(OzoneClientUtils.asKeyInfo(key))));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
index ea563ad..6e266fd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
@@ -19,12 +19,11 @@
 package org.apache.hadoop.ozone.web.ozShell.keys;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneKey;
+import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 
@@ -33,14 +32,14 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
-import java.util.stream.Collectors;
 
 /**
  * Executes List Keys.
  */
 public class ListKeyHandler extends Handler {
-  private String userName;
   private String volumeName;
   private String bucketName;
 
@@ -57,14 +56,15 @@ public class ListKeyHandler extends Handler {
       throws IOException, OzoneException, URISyntaxException {
 
     if (!cmd.hasOption(Shell.LIST_KEY)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : listKey is missing");
     }
 
-    String length = null;
+    int maxKeys = Integer.MAX_VALUE;
     if (cmd.hasOption(Shell.LIST_LENGTH)) {
-      length = cmd.getOptionValue(Shell.LIST_LENGTH);
+      String length = cmd.getOptionValue(Shell.LIST_LENGTH);
       OzoneUtils.verifyMaxKeyLength(length);
+      maxKeys = Integer.parseInt(length);
     }
 
     String startKey = null;
@@ -81,7 +81,7 @@ public class ListKeyHandler extends Handler {
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 2) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume/bucket is required in listKey");
     }
 
@@ -94,23 +94,23 @@ public class ListKeyHandler extends Handler {
       System.out.printf("bucket Name : %s%n", bucketName);
     }
 
-    if (cmd.hasOption(Shell.USER)) {
-      userName = cmd.getOptionValue(Shell.USER);
-    } else {
-      userName = System.getProperty("user.name");
-    }
-
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(userName);
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    Iterator<OzoneKey> keyIterator = bucket.listKeys(prefix, startKey);
+    List<KeyInfo> keyInfos = new ArrayList<>();
 
-    List<OzoneKey> keys = client.listKeys(volumeName, bucketName, length,
-        startKey, prefix);
+    while (maxKeys > 0 && keyIterator.hasNext()) {
+      KeyInfo key = OzoneClientUtils.asKeyInfo(keyIterator.next());
+      keyInfos.add(key);
+      maxKeys -= 1;
+    }
 
-    List<KeyInfo> jsonData = keys.stream()
-        .map(OzoneKey::getObjectInfo).collect(Collectors.toList());
-    System.out.printf(JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-        JsonUtils.toJsonString(jsonData)));
+    if (cmd.hasOption(Shell.VERBOSE)) {
+      System.out.printf("Found : %d keys for bucket %s in volume : %s ",
+          keyInfos.size(), bucketName, volumeName);
+    }
+    System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(
+        JsonUtils.toJsonString(keyInfos)));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
index 1f2c692..ed8cc88 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
@@ -20,7 +20,15 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -33,11 +41,15 @@ import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
+
 /**
  * Puts a file into an ozone bucket.
  */
 public class PutKeyHandler extends Handler {
-  private String userName;
   private String volumeName;
   private String bucketName;
   private String keyName;
@@ -54,24 +66,18 @@ public class PutKeyHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.PUT_KEY)) {
-      throw new OzoneRestClientException("Incorrect call : putKey is missing");
+      throw new OzoneClientException("Incorrect call : putKey is missing");
     }
 
     if (!cmd.hasOption(Shell.FILE)) {
-      throw new OzoneRestClientException("put key needs a file to put");
-    }
-
-    if (cmd.hasOption(Shell.USER)) {
-      userName = cmd.getOptionValue(Shell.USER);
-    } else {
-      userName = System.getProperty("user.name");
+      throw new OzoneClientException("put key needs a file to put");
     }
 
     String ozoneURIString = cmd.getOptionValue(Shell.PUT_KEY);
     URI ozoneURI = verifyURI(ozoneURIString);
     Path path = Paths.get(ozoneURI.getPath());
     if (path.getNameCount() < 3) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "volume/bucket/key name required in putKey");
     }
 
@@ -86,7 +92,6 @@ public class PutKeyHandler extends Handler {
       System.out.printf("Key Name : %s%n", keyName);
     }
 
-
     String fileName = cmd.getOptionValue(Shell.FILE);
     File dataFile = new File(fileName);
 
@@ -97,10 +102,22 @@ public class PutKeyHandler extends Handler {
       stream.close();
     }
 
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(userName);
-
-    client.putKey(volumeName, bucketName, keyName, dataFile);
+    Configuration conf = new OzoneConfiguration();
+    ReplicationFactor replicationFactor = ReplicationFactor.valueOf(
+        conf.getInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()));
+    ReplicationType replicationType = ReplicationType.valueOf(
+        conf.get(OZONE_REPLICATION_TYPE, ReplicationType.RATIS.toString()));
+
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    OzoneOutputStream outputStream = bucket
+        .createKey(keyName, dataFile.length(), replicationType,
+            replicationFactor);
+    FileInputStream fileInputStream = new FileInputStream(dataFile);
+    IOUtils.copyBytes(fileInputStream, outputStream,
+        conf.getInt(OZONE_SCM_CHUNK_SIZE_KEY, OZONE_SCM_CHUNK_SIZE_DEFAULT));
+    outputStream.close();
+    fileInputStream.close();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
index c0b0bb9..74fdbb0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -52,14 +54,14 @@ public class CreateVolumeHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.CREATE_VOLUME)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : createVolume is missing");
     }
 
     String ozoneURIString = cmd.getOptionValue(Shell.CREATE_VOLUME);
     URI ozoneURI = verifyURI(ozoneURIString);
     if (ozoneURI.getPath().isEmpty()) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Volume name is required to create a volume");
     }
 
@@ -77,7 +79,7 @@ public class CreateVolumeHandler extends Handler {
     }
 
     if (!cmd.hasOption(Shell.USER)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "User name is needed in createVolume call.");
     }
 
@@ -86,13 +88,19 @@ public class CreateVolumeHandler extends Handler {
     }
 
     userName = cmd.getOptionValue(Shell.USER);
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
 
-    OzoneVolume vol = client.createVolume(volumeName, userName, quota);
+    VolumeArgs.Builder volumeArgsBuilder = VolumeArgs.newBuilder()
+        .setAdmin(rootName)
+        .setOwner(userName);
+    if (quota != null) {
+      volumeArgsBuilder.setQuota(quota);
+    }
+    client.getObjectStore().createVolume(volumeName, volumeArgsBuilder.build());
+
     if (cmd.hasOption(Shell.VERBOSE)) {
-      System.out.printf("%s%n",
-          JsonUtils.toJsonStringWithDefaultPrettyPrinter(vol.getJsonString()));
+      OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+      System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
+          JsonUtils.toJsonString(OzoneClientUtils.asVolumeInfo(vol))));
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
index bed7db3..d6facf6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -34,7 +34,6 @@ import java.net.URISyntaxException;
 public class DeleteVolumeHandler extends Handler {
 
   private String volumeName;
-  private String rootName;
 
   /**
    * Executes the delete volume call.
@@ -49,14 +48,14 @@ public class DeleteVolumeHandler extends Handler {
       throws IOException, OzoneException, URISyntaxException {
 
     if (!cmd.hasOption(Shell.DELETE_VOLUME)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : deleteVolume call is missing");
     }
 
     String ozoneURIString = cmd.getOptionValue(Shell.DELETE_VOLUME);
     URI ozoneURI = verifyURI(ozoneURIString);
     if (ozoneURI.getPath().isEmpty()) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Volume name is required to delete a volume");
     }
 
@@ -67,15 +66,6 @@ public class DeleteVolumeHandler extends Handler {
       System.out.printf("Volume name : %s%n", volumeName);
     }
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    } else {
-      rootName = System.getProperty("user.name");
-    }
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-    client.deleteVolume(volumeName);
-
+    client.getObjectStore().deleteVolume(volumeName);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
index 16de3d4..b5be2c6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
@@ -19,8 +19,9 @@
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -35,9 +36,7 @@ import java.net.URISyntaxException;
  */
 public class InfoVolumeHandler extends Handler{
 
-  private String rootName;
   private String volumeName;
-  private String userName;
 
   /**
    * Executes volume Info.
@@ -53,43 +52,22 @@ public class InfoVolumeHandler extends Handler{
       throws IOException, OzoneException, URISyntaxException {
 
     if (!cmd.hasOption(Shell.INFO_VOLUME)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : infoVolume is missing");
     }
 
     String ozoneURIString = cmd.getOptionValue(Shell.INFO_VOLUME);
     URI ozoneURI = verifyURI(ozoneURIString);
     if (ozoneURI.getPath().isEmpty()) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Volume name is required to get info of a volume");
     }
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    }
-
     // we need to skip the slash in the URI path
     volumeName = ozoneURI.getPath().substring(1);
 
-    if (cmd.hasOption(Shell.USER)) {
-      userName = cmd.getOptionValue(Shell.USER);
-    } else {
-      userName = System.getProperty("user.name");
-    }
-
-    client.setEndPointURI(ozoneURI);
-
-    if (rootName != null) {
-      client.setUserAuth(rootName);
-    } else {
-      client.setUserAuth(userName);
-    }
-
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-
-    OzoneVolume vol = client.getVolume(volumeName);
-    System.out.printf("%s%n",
-        JsonUtils.toJsonStringWithDefaultPrettyPrinter(vol.getJsonString()));
+    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
+    System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
+        JsonUtils.toJsonString(OzoneClientUtils.asVolumeInfo(vol))));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
index 189c891..3749df4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
@@ -19,26 +19,26 @@
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 
 import java.io.IOException;
-import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
-import java.util.stream.Collectors;
 
 /**
  * Executes List Volume call.
  */
 public class ListVolumeHandler extends Handler {
-  private String rootName;
   private String userName;
 
   /**
@@ -54,16 +54,16 @@ public class ListVolumeHandler extends Handler {
       throws IOException, OzoneException, URISyntaxException {
 
     if (!cmd.hasOption(Shell.LIST_VOLUME)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : listVolume is missing");
     }
 
-    int maxKeys = 0;
+    int maxVolumes = Integer.MAX_VALUE;
     if (cmd.hasOption(Shell.LIST_LENGTH)) {
       String length = cmd.getOptionValue(Shell.LIST_LENGTH);
       OzoneUtils.verifyMaxKeyLength(length);
 
-      maxKeys = Integer.parseInt(length);
+      maxVolumes = Integer.parseInt(length);
     }
 
     String startVolume = null;
@@ -77,11 +77,7 @@ public class ListVolumeHandler extends Handler {
     }
 
     String ozoneURIString = cmd.getOptionValue(Shell.LIST_VOLUME);
-    URI ozoneURI = verifyURI(ozoneURIString);
-
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    }
+    verifyURI(ozoneURIString);
 
     if (cmd.hasOption(Shell.USER)) {
       userName = cmd.getOptionValue(Shell.USER);
@@ -89,26 +85,28 @@ public class ListVolumeHandler extends Handler {
       userName = System.getProperty("user.name");
     }
 
-    client.setEndPointURI(ozoneURI);
-    if (rootName != null) {
-      client.setUserAuth(rootName);
+    Iterator<OzoneVolume> volumeIterator;
+    if(userName != null) {
+      volumeIterator = client.getObjectStore()
+          .listVolumesByUser(userName, prefix, startVolume);
     } else {
-      client.setUserAuth(userName);
+      volumeIterator = client.getObjectStore().listVolumes(prefix);
+    }
+
+    List<VolumeInfo> volumeInfos = new ArrayList<>();
+
+    while (maxVolumes > 0 && volumeIterator.hasNext()) {
+      VolumeInfo volume = OzoneClientUtils.asVolumeInfo(volumeIterator.next());
+      volumeInfos.add(volume);
+      maxVolumes -= 1;
     }
 
-    List<OzoneVolume> volumes = client.listVolumes(userName, prefix, maxKeys,
-        startVolume);
-    if (volumes != null) {
-      if (cmd.hasOption(Shell.VERBOSE)) {
-        System.out.printf("Found : %d volumes for user : %s %n", volumes.size(),
-            userName);
-      }
-
-      List<VolumeInfo> jsonData = volumes.stream()
-          .map(OzoneVolume::getVolumeInfo).collect(Collectors.toList());
-      System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(
-          JsonUtils.toJsonString(jsonData)));
+    if (cmd.hasOption(Shell.VERBOSE)) {
+      System.out.printf("Found : %d volumes for user : %s ", volumeInfos.size(),
+          userName);
     }
+    System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(
+        JsonUtils.toJsonString(volumeInfos)));
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
index 164fe34..1e3fbb5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
+import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -32,7 +34,6 @@ import java.net.URISyntaxException;
  * Executes update volume calls.
  */
 public class UpdateVolumeHandler extends Handler {
-  private String rootName;
   private String ownerName;
   private String volumeName;
   private String quota;
@@ -49,26 +50,20 @@ public class UpdateVolumeHandler extends Handler {
   protected void execute(CommandLine cmd)
       throws IOException, OzoneException, URISyntaxException {
     if (!cmd.hasOption(Shell.UPDATE_VOLUME)) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Incorrect call : updateVolume is missing");
     }
 
     String ozoneURIString = cmd.getOptionValue(Shell.UPDATE_VOLUME);
     URI ozoneURI = verifyURI(ozoneURIString);
     if (ozoneURI.getPath().isEmpty()) {
-      throw new OzoneRestClientException(
+      throw new OzoneClientException(
           "Volume name is required to update a volume");
     }
 
     // we need to skip the slash in the URI path
     volumeName = ozoneURI.getPath().substring(1);
 
-    if (cmd.hasOption(Shell.RUNAS)) {
-      rootName = "hdfs";
-    } else {
-      rootName = System.getProperty("user.name");
-    }
-
     if (cmd.hasOption(Shell.QUOTA)) {
       quota = cmd.getOptionValue(Shell.QUOTA);
     }
@@ -77,16 +72,13 @@ public class UpdateVolumeHandler extends Handler {
       ownerName = cmd.getOptionValue(Shell.USER);
     }
 
-    client.setEndPointURI(ozoneURI);
-    client.setUserAuth(rootName);
-
+    OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
     if (quota != null && !quota.isEmpty()) {
-      client.setVolumeQuota(volumeName, quota);
+      volume.setQuota(OzoneQuota.parseQuota(quota));
     }
 
     if (ownerName != null && !ownerName.isEmpty()) {
-      client.setVolumeOwner(volumeName, ownerName);
+      volume.setOwner(ownerName);
     }
-
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
index 992d43a..832a0cb 100644
--- a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
+++ b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
@@ -23,12 +23,8 @@ package org.apache.hadoop.fs.ozone;
  */
 public final class Constants {
 
-  public static final String OZONE_URI_SCHEME = "o3";
-
   public static final String OZONE_DEFAULT_USER = "hdfs";
 
-  public static final String OZONE_HTTP_SCHEME = "http://";
-
   public static final String OZONE_USER_DIR = "/user";
 
   /** Local buffer directory. */
@@ -37,8 +33,6 @@ public final class Constants {
   /** Temporary directory. */
   public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir";
 
-  public static final String OZONE_URI_DELIMITER = "/";
-
   /** Page size for Ozone listing operation. */
   public static final int LISTING_PAGE_SIZE = 1024;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
index 46dd645..4163c13 100644
--- a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
+++ b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 import java.io.IOException;
 import java.net.URI;
@@ -38,6 +39,6 @@ public class OzFs extends DelegateToFileSystem {
   public OzFs(URI theUri, Configuration conf)
       throws IOException, URISyntaxException {
     super(theUri, new OzoneFileSystem(), conf,
-        Constants.OZONE_URI_SCHEME, false);
+        OzoneConsts.OZONE_URI_SCHEME, false);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index ef0d3ab..0ff1d50 100644
--- a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -64,9 +64,9 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_URI_SCHEME;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_URI_DELIMITER;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
index a7a53dc..4d8c9d6 100644
--- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.handlers.BucketArgs;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
@@ -94,7 +95,7 @@ public class TestOzoneFSInputStream {
 
     // Set the fs.defaultFS and start the filesystem
     String uri = String.format("%s://%s.%s/",
-        Constants.OZONE_URI_SCHEME, bucketName, volumeName);
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
     fs =  FileSystem.get(conf);
     int fileLen = 100 * 1024 * 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index 9f94e37..5a7cb4f 100644
--- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -24,6 +24,7 @@ import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -118,7 +119,7 @@ public class TestOzoneFileInterfaces {
     storageHandler.createBucket(bucketArgs);
 
     rootPath = String
-        .format("%s://%s.%s/", Constants.OZONE_URI_SCHEME, bucketName,
+        .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName,
             volumeName);
     if (setDefaultFs) {
       // Set the fs.defaultFS and start the filesystem
@@ -145,7 +146,7 @@ public class TestOzoneFileInterfaces {
           "The initialized file system is not OzoneFileSystem but " +
               fs.getClass(),
           fs instanceof OzoneFileSystem);
-      assertEquals(Constants.OZONE_URI_SCHEME, fs.getUri().getScheme());
+      assertEquals(OzoneConsts.OZONE_URI_SCHEME, fs.getUri().getScheme());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db209ba/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index 3848bc8..176b614 100644
--- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -24,9 +24,9 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.ozone.Constants;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.handlers.BucketArgs;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
@@ -56,7 +56,7 @@ class OzoneContract extends AbstractFSContract {
 
   @Override
   public String getScheme() {
-    return Constants.OZONE_URI_SCHEME;
+    return OzoneConsts.OZONE_URI_SCHEME;
   }
 
   @Override
@@ -107,7 +107,7 @@ class OzoneContract extends AbstractFSContract {
     }
 
     String uri = String.format("%s://%s.%s/",
-        Constants.OZONE_URI_SCHEME, bucketName, volumeName);
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
     getConf().set("fs.defaultFS", uri);
     copyClusterConfigs(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY);
     copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDDS-53. Fix TestKey#testPutAndGetKeyWithDnRestart. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-53. Fix TestKey#testPutAndGetKeyWithDnRestart.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d7decbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d7decbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d7decbc

Branch: refs/heads/HDDS-4
Commit: 7d7decbc149912a1a02adb0a535ccf06cc999d6b
Parents: e4c96b9
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 10:13:13 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java   | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d7decbc/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 6663933..b837100 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -63,6 +63,14 @@ import java.util.concurrent.TimeoutException;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
     .HEALTHY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .DFS_CONTAINER_IPC_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .DFS_CONTAINER_IPC_RANDOM_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .DFS_CONTAINER_RATIS_IPC_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
 
 /**
  * MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
@@ -212,6 +220,14 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     HddsDatanodeService datanodeService = hddsDatanodes.get(i);
     datanodeService.stop();
     datanodeService.join();
+    // ensure same ports are used across restarts.
+    Configuration conf = datanodeService.getConf();
+    int currentPort = datanodeService.getDatanodeDetails().getContainerPort();
+    conf.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
+    conf.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
+    int ratisPort = datanodeService.getDatanodeDetails().getRatisPort();
+    conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
+    conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
     datanodeService.start(null);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDFS-13539. DFSStripedInputStream NPE when reportCheckSumFailure.

Posted by xy...@apache.org.
HDFS-13539. DFSStripedInputStream NPE when reportCheckSumFailure.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/307710d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/307710d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/307710d1

Branch: refs/heads/HDDS-4
Commit: 307710d1b5d7a34cdf34c3761b68e170ffced995
Parents: f2358e7
Author: Xiao Chen <xi...@apache.org>
Authored: Mon May 14 09:28:09 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:10 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 13 +++++++++-
 .../hadoop/hdfs/DFSStripedInputStream.java      |  8 ++++---
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 25 ++++++++++++++++++++
 3 files changed, 42 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/307710d1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index d3d6669..b38e629 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -790,13 +790,24 @@ public class DFSInputStream extends FSInputStream
           // Check if need to report block replicas corruption either read
           // was successful or ChecksumException occurred.
           reportCheckSumFailure(corruptedBlocks,
-              currentLocatedBlock.getLocations().length, false);
+              getCurrentBlockLocationsLength(), false);
         }
       }
     }
     return -1;
   }
 
+  protected int getCurrentBlockLocationsLength() {
+    int len = 0;
+    if (currentLocatedBlock == null) {
+      DFSClient.LOG.info("Found null currentLocatedBlock. pos={}, "
+          + "blockEnd={}, fileLength={}", pos, blockEnd, getFileLength());
+    } else {
+      len = currentLocatedBlock.getLocations().length;
+    }
+    return len;
+  }
+
   /**
    * Read the entire buffer.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/307710d1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 339a02c..f3b16e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.protocol.BlockType;
@@ -160,7 +161,8 @@ public class DFSStripedInputStream extends DFSInputStream {
    * When seeking into a new block group, create blockReader for each internal
    * block in the group.
    */
-  private synchronized void blockSeekTo(long target) throws IOException {
+  @VisibleForTesting
+  synchronized void blockSeekTo(long target) throws IOException {
     if (target >= getFileLength()) {
       throw new IOException("Attempted to read past end of file");
     }
@@ -400,8 +402,8 @@ public class DFSStripedInputStream extends DFSInputStream {
       } finally {
         // Check if need to report block replicas corruption either read
         // was successful or ChecksumException occurred.
-        reportCheckSumFailure(corruptedBlocks,
-            currentLocatedBlock.getLocations().length, true);
+        reportCheckSumFailure(corruptedBlocks, getCurrentBlockLocationsLength(),
+            true);
       }
     }
     return -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/307710d1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index de276a9..cdebee0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -51,7 +52,12 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
 
 public class TestDFSStripedInputStream {
 
@@ -504,4 +510,23 @@ public class TestDFSStripedInputStream {
       in.close();
     }
   }
+
+  @Test
+  public void testReadFailToGetCurrentBlock() throws Exception {
+    DFSTestUtil.writeFile(cluster.getFileSystem(), filePath, "test");
+    try (DFSStripedInputStream in = (DFSStripedInputStream) fs.getClient()
+        .open(filePath.toString())) {
+      final DFSStripedInputStream spy = spy(in);
+      final String msg = "Injected exception for testReadNPE";
+      doThrow(new IOException(msg)).when(spy).blockSeekTo(anyLong());
+      assertNull(in.getCurrentBlock());
+      try {
+        spy.read();
+        fail("read should have failed");
+      } catch (IOException expected) {
+        LOG.info("Exception caught", expected);
+        GenericTestUtils.assertExceptionContains(msg, expected);
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-8202. DefaultAMSProcessor should properly check units of requested custom resource types against minimum/maximum allocation (snemeth via rkanter)

Posted by xy...@apache.org.
YARN-8202. DefaultAMSProcessor should properly check units of requested custom resource types against minimum/maximum allocation (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8fa7cb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8fa7cb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8fa7cb6

Branch: refs/heads/HDDS-4
Commit: c8fa7cb6d0c12c4e65e53ea60167b856511b8294
Parents: 5c1c344
Author: Robert Kanter <rk...@apache.org>
Authored: Thu May 10 09:31:59 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:08 2018 -0700

----------------------------------------------------------------------
 .../v2/app/rm/ContainerRequestCreator.java      |  57 ++
 .../v2/app/rm/TestRMContainerAllocator.java     | 534 ++++++++++---------
 .../hadoop/yarn/util/UnitsConversionUtil.java   |  44 +-
 .../resourcetypes/ResourceTypesTestHelper.java  |  93 ++++
 .../hadoop/yarn/server/utils/BuilderUtils.java  |   8 +-
 .../scheduler/SchedulerUtils.java               |  95 +++-
 .../TestApplicationMasterService.java           | 185 +++++--
 .../scheduler/TestSchedulerUtils.java           | 278 +++++++++-
 8 files changed, 961 insertions(+), 333 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
new file mode 100644
index 0000000..39a9ddc
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+final class ContainerRequestCreator {
+
+  private ContainerRequestCreator() {}
+
+  static ContainerRequestEvent createRequest(JobId jobId, int taskAttemptId,
+          Resource resource, String[] hosts) {
+    return createRequest(jobId, taskAttemptId, resource, hosts,
+            false, false);
+  }
+
+  static ContainerRequestEvent createRequest(JobId jobId, int taskAttemptId,
+          Resource resource, String[] hosts, boolean earlierFailedAttempt,
+          boolean reduce) {
+    final TaskId taskId;
+    if (reduce) {
+      taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+    } else {
+      taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+    }
+    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
+            taskAttemptId);
+
+    if (earlierFailedAttempt) {
+      return ContainerRequestEvent
+              .createContainerRequestEventForFailedContainer(attemptId,
+                      resource);
+    }
+    return new ContainerRequestEvent(attemptId, resource, hosts,
+            new String[]{NetworkTopology.DEFAULT_RACK});
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 7875917..427e6ea 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.app.rm;
 
+import static org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestCreator.createRequest;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyFloat;
@@ -96,7 +97,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.CollectorInfo;
@@ -203,7 +203,7 @@ public class TestRMContainerAllocator {
     JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
-        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
             0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
@@ -215,13 +215,13 @@ public class TestRMContainerAllocator {
     rm.drainEvents();
 
     // create the container request
-    ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
-        new String[] { "h1" });
+    ContainerRequestEvent event1 = ContainerRequestCreator.createRequest(jobId,
+        1, Resource.newInstance(1024, 1), new String[] {"h1"});
     allocator.sendRequest(event1);
 
     // send 1 more request with different resource req
-    ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
-        new String[] { "h2" });
+    ContainerRequestEvent event2 = ContainerRequestCreator.createRequest(jobId,
+        2, Resource.newInstance(1024, 1), new String[] {"h2"});
     allocator.sendRequest(event2);
 
     // this tells the scheduler about the requests
@@ -232,8 +232,8 @@ public class TestRMContainerAllocator {
     Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());
 
     // send another request with different resource and priority
-    ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
-        new String[] { "h3" });
+    ContainerRequestEvent event3 = ContainerRequestCreator.createRequest(jobId,
+        3, Resource.newInstance(1024, 1), new String[] {"h3"});
     allocator.sendRequest(event3);
 
     // this tells the scheduler about the requests
@@ -242,7 +242,7 @@ public class TestRMContainerAllocator {
     rm.drainEvents();
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
     Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
-    
+
     // update resources in scheduler
     nodeManager1.nodeHeartbeat(true); // Node heartbeat
     nodeManager2.nodeHeartbeat(true); // Node heartbeat
@@ -252,21 +252,21 @@ public class TestRMContainerAllocator {
     assigned = allocator.schedule();
     rm.drainEvents();
     Assert.assertEquals(0, rm.getMyFifoScheduler().lastAsk.size());
-    checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
+    checkAssignments(new ContainerRequestEvent[] {event1, event2, event3},
         assigned, false);
-    
+
     // check that the assigned container requests are cancelled
     allocator.schedule();
     rm.drainEvents();
     Assert.assertEquals(5, rm.getMyFifoScheduler().lastAsk.size());
   }
-  
-  @Test 
+
+  @Test
   public void testMapNodeLocality() throws Exception {
-    // test checks that ordering of allocated containers list from the RM does 
-    // not affect the map->container assignment done by the AM. If there is a 
-    // node local container available for a map then it should be assigned to 
-    // that container and not a rack-local container that happened to be seen 
+    // test checks that ordering of allocated containers list from the RM does
+    // not affect the map->container assignment done by the AM. If there is a
+    // node local container available for a map then it should be assigned to
+    // that container and not a rack-local container that happened to be seen
     // earlier in the allocated containers list from the RM.
     // Regression test for MAPREDUCE-4893
     LOG.info("Running testMapNodeLocality");
@@ -291,26 +291,29 @@ public class TestRMContainerAllocator {
     JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
-        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
             0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
 
     // add resources to scheduler
-    MockNM nodeManager1 = rm.registerNode("h1:1234", 3072); // can assign 2 maps 
+    MockNM nodeManager1 = rm.registerNode("h1:1234", 3072); // can assign 2 maps
     rm.registerNode("h2:1234", 10240); // wont heartbeat on node local node
     MockNM nodeManager3 = rm.registerNode("h3:1234", 1536); // assign 1 map
     rm.drainEvents();
 
     // create the container requests for maps
-    ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
-        new String[] { "h1" });
+    ContainerRequestEvent event1 = ContainerRequestCreator.createRequest(
+            jobId, 1, Resource.newInstance(1024, 1),
+            new String[]{"h1"});
     allocator.sendRequest(event1);
-    ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
-        new String[] { "h1" });
+    ContainerRequestEvent event2 = ContainerRequestCreator.createRequest(
+            jobId, 2, Resource.newInstance(1024, 1),
+            new String[]{"h1"});
     allocator.sendRequest(event2);
-    ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
-        new String[] { "h2" });
+    ContainerRequestEvent event3 = ContainerRequestCreator.createRequest(
+            jobId, 3, Resource.newInstance(1024, 1),
+            new String[]{"h2"});
     allocator.sendRequest(event3);
 
     // this tells the scheduler about the requests
@@ -323,14 +326,14 @@ public class TestRMContainerAllocator {
     // Node heartbeat from rack-local first. This makes node h3 the first in the
     // list of allocated containers but it should not be assigned to task1.
     nodeManager3.nodeHeartbeat(true);
-    // Node heartbeat from node-local next. This allocates 2 node local 
+    // Node heartbeat from node-local next. This allocates 2 node local
     // containers for task1 and task2. These should be matched with those tasks.
     nodeManager1.nodeHeartbeat(true);
     rm.drainEvents();
 
     assigned = allocator.schedule();
     rm.drainEvents();
-    checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
+    checkAssignments(new ContainerRequestEvent[] {event1, event2, event3},
         assigned, false);
     // remove the rack-local assignment that should have happened for task3
     for(TaskAttemptContainerAssignedEvent event : assigned) {
@@ -340,7 +343,7 @@ public class TestRMContainerAllocator {
         break;
       }
     }
-    checkAssignments(new ContainerRequestEvent[] { event1, event2},
+    checkAssignments(new ContainerRequestEvent[] {event1, event2},
         assigned, true);
   }
 
@@ -381,13 +384,15 @@ public class TestRMContainerAllocator {
     rm.drainEvents();
 
     // create the container request
-    ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
-        new String[] { "h1" });
+    ContainerRequestEvent event1 = ContainerRequestCreator.createRequest(
+            jobId, 1, Resource.newInstance(1024, 1),
+        new String[] {"h1"});
     allocator.sendRequest(event1);
 
     // send 1 more request with different resource req
-    ContainerRequestEvent event2 = createReq(jobId, 2, 2048,
-        new String[] { "h2" });
+    ContainerRequestEvent event2 = ContainerRequestCreator.createRequest(
+            jobId, 2, Resource.newInstance(1024, 1),
+        new String[] {"h2"});
     allocator.sendRequest(event2);
 
     // this tells the scheduler about the requests
@@ -404,7 +409,7 @@ public class TestRMContainerAllocator {
 
     assigned = allocator.schedule();
     rm.drainEvents();
-    checkAssignments(new ContainerRequestEvent[] { event1, event2 },
+    checkAssignments(new ContainerRequestEvent[] {event1, event2},
         assigned, false);
   }
 
@@ -439,15 +444,19 @@ public class TestRMContainerAllocator {
     rm.drainEvents();
 
     // create the container request
-    final String[] locations = new String[] { host };
-    allocator.sendRequest(createReq(jobId, 0, 1024, locations, false, true));
+    final String[] locations = new String[] {host};
+    allocator.sendRequest(createRequest(jobId, 0,
+            Resource.newInstance(1024, 1),
+            locations, false, true));
     for (int i = 0; i < 1;) {
       rm.drainEvents();
       i += allocator.schedule().size();
       nm.nodeHeartbeat(true);
     }
 
-    allocator.sendRequest(createReq(jobId, 0, 1024, locations, true, false));
+    allocator.sendRequest(createRequest(jobId, 0,
+            Resource.newInstance(1024, 1),
+            locations, true, false));
     while (allocator.getTaskAttemptKillEvents().size() == 0) {
       rm.drainEvents();
       allocator.schedule().size();
@@ -494,9 +503,10 @@ public class TestRMContainerAllocator {
     RMContainerAllocator.ScheduledRequests scheduledRequests =
         allocator.getScheduledRequests();
     ContainerRequestEvent event1 =
-        createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
+        createRequest(jobId, 1, Resource.newInstance(2048, 1),
+            new String[] {"h1"}, false, false);
     scheduledRequests.maps.put(mock(TaskAttemptId.class),
-        new RMContainerRequestor.ContainerRequest(event1, null,null));
+        new RMContainerRequestor.ContainerRequest(event1, null, null));
     assignedRequests.reduces.put(mock(TaskAttemptId.class),
         mock(Container.class));
 
@@ -547,9 +557,12 @@ public class TestRMContainerAllocator {
     RMContainerAllocator.ScheduledRequests scheduledRequests =
         allocator.getScheduledRequests();
     ContainerRequestEvent event1 =
-        createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
+        createRequest(jobId, 1,
+                Resource.newInstance(2048, 1),
+                new String[] {"h1"}, false, false);
     scheduledRequests.maps.put(mock(TaskAttemptId.class),
-        new RMContainerRequestor.ContainerRequest(event1, null, clock.getTime()));
+        new RMContainerRequestor.ContainerRequest(event1, null,
+                clock.getTime()));
     assignedRequests.reduces.put(mock(TaskAttemptId.class),
         mock(Container.class));
 
@@ -561,7 +574,7 @@ public class TestRMContainerAllocator {
     clock.setTime(clock.getTime() + (preemptThreshold) * 1000);
     allocator.preemptReducesIfNeeded();
     Assert.assertEquals("The reducer is not preeempted", 1,
-        assignedRequests.preemptionWaitingReduces.size());
+            assignedRequests.preemptionWaitingReduces.size());
   }
 
   @Test(timeout = 30000)
@@ -608,9 +621,12 @@ public class TestRMContainerAllocator {
     RMContainerAllocator.ScheduledRequests scheduledRequests =
         allocator.getScheduledRequests();
     ContainerRequestEvent event1 =
-        createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
+        createRequest(jobId, 1,
+                Resource.newInstance(2048, 1),
+                new String[] {"h1"}, false, false);
     scheduledRequests.maps.put(mock(TaskAttemptId.class),
-        new RMContainerRequestor.ContainerRequest(event1, null, clock.getTime()));
+        new RMContainerRequestor.ContainerRequest(event1, null,
+                clock.getTime()));
     assignedRequests.reduces.put(mock(TaskAttemptId.class),
         mock(Container.class));
 
@@ -651,13 +667,17 @@ public class TestRMContainerAllocator {
         appAttemptId, mockJob, SystemClock.getInstance());
 
     // request to allocate two reduce priority containers
-    final String[] locations = new String[] { host };
-    allocator.sendRequest(createReq(jobId, 0, 1024, locations, false, true));
+    final String[] locations = new String[] {host};
+    allocator.sendRequest(createRequest(jobId, 0,
+            Resource.newInstance(1024, 1),
+            locations, false, true));
     allocator.scheduleAllReduces();
     allocator.makeRemoteRequest();
     nm.nodeHeartbeat(true);
     rm.drainEvents();
-    allocator.sendRequest(createReq(jobId, 1, 1024, locations, false, false));
+    allocator.sendRequest(createRequest(jobId, 1,
+            Resource.newInstance(1024, 1),
+            locations, false, false));
 
     int assignedContainer;
     for (assignedContainer = 0; assignedContainer < 1;) {
@@ -684,7 +704,7 @@ public class TestRMContainerAllocator {
     conf.set(MRJobConfig.REDUCE_NODE_LABEL_EXP, "ReduceNodes");
     ApplicationId appId = ApplicationId.newInstance(1, 1);
     ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
+            ApplicationAttemptId.newInstance(appId, 1);
     JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
@@ -706,13 +726,16 @@ public class TestRMContainerAllocator {
 
     // create some map requests
     ContainerRequestEvent reqMapEvents;
-    reqMapEvents = createReq(jobId, 0, 1024, new String[] { "map" });
+    reqMapEvents = ContainerRequestCreator.createRequest(jobId, 0,
+            Resource.newInstance(1024, 1), new String[]{"map"});
     allocator.sendRequests(Arrays.asList(reqMapEvents));
 
     // create some reduce requests
     ContainerRequestEvent reqReduceEvents;
     reqReduceEvents =
-        createReq(jobId, 0, 2048, new String[] { "reduce" }, false, true);
+        createRequest(jobId, 0,
+                Resource.newInstance(2048, 1),
+                new String[] {"reduce"}, false, true);
     allocator.sendRequests(Arrays.asList(reqReduceEvents));
     allocator.schedule();
     // verify all of the host-specific asks were sent plus one for the
@@ -883,18 +906,21 @@ public class TestRMContainerAllocator {
 
     // create the container request
     // send MAP request
-    ContainerRequestEvent event1 = createReq(jobId, 1, 2048, new String[] {
-        "h1", "h2" }, true, false);
+    ContainerRequestEvent event1 = createRequest(jobId, 1,
+            Resource.newInstance(2048, 1),
+            new String[] {"h1", "h2"}, true, false);
     allocator.sendRequest(event1);
 
     // send REDUCE request
-    ContainerRequestEvent event2 = createReq(jobId, 2, 3000,
-        new String[] { "h1" }, false, true);
+    ContainerRequestEvent event2 = createRequest(jobId, 2,
+            Resource.newInstance(3000, 1),
+            new String[] {"h1"}, false, true);
     allocator.sendRequest(event2);
 
     // send MAP request
-    ContainerRequestEvent event3 = createReq(jobId, 3, 2048,
-        new String[] { "h3" }, false, false);
+    ContainerRequestEvent event3 = createRequest(jobId, 3,
+            Resource.newInstance(2048, 1),
+            new String[] {"h3"}, false, false);
     allocator.sendRequest(event3);
 
     // this tells the scheduler about the requests
@@ -911,7 +937,7 @@ public class TestRMContainerAllocator {
 
     assigned = allocator.schedule();
     rm.drainEvents();
-    checkAssignments(new ContainerRequestEvent[] { event1, event3 },
+    checkAssignments(new ContainerRequestEvent[] {event1, event3},
         assigned, false);
 
     // validate that no container is assigned to h1 as it doesn't have 2048
@@ -921,10 +947,10 @@ public class TestRMContainerAllocator {
     }
   }
 
-  private static class MyResourceManager extends MockRM {
+  static class MyResourceManager extends MockRM {
 
     private static long fakeClusterTimeStamp = System.currentTimeMillis();
-    
+
     public MyResourceManager(Configuration conf) {
       super(conf);
     }
@@ -955,7 +981,7 @@ public class TestRMContainerAllocator {
     protected ResourceScheduler createScheduler() {
       return new MyFifoScheduler(this.getRMContext());
     }
-    
+
     MyFifoScheduler getMyFifoScheduler() {
       return (MyFifoScheduler) scheduler;
     }
@@ -1221,7 +1247,7 @@ public class TestRMContainerAllocator {
     Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
   }
-  
+
   @Test
   public void testUpdatedNodes() throws Exception {
     Configuration conf = new Configuration();
@@ -1251,11 +1277,13 @@ public class TestRMContainerAllocator {
     rm.drainEvents();
 
     // create the map container request
-    ContainerRequestEvent event = createReq(jobId, 1, 1024,
-        new String[] { "h1" });
+    ContainerRequestEvent event =
+            ContainerRequestCreator.createRequest(jobId, 1,
+                    Resource.newInstance(1024, 1),
+                    new String[] {"h1"});
     allocator.sendRequest(event);
     TaskAttemptId attemptId = event.getAttemptID();
-    
+
     TaskAttempt mockTaskAttempt = mock(TaskAttempt.class);
     when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
     Task mockTask = mock(Task.class);
@@ -1279,7 +1307,7 @@ public class TestRMContainerAllocator {
     // no updated nodes reported
     Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
     Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
-    
+
     // mark nodes bad
     nm1.nodeHeartbeat(false);
     nm2.nodeHeartbeat(false);
@@ -1292,11 +1320,13 @@ public class TestRMContainerAllocator {
     // updated nodes are reported
     Assert.assertEquals(1, allocator.getJobUpdatedNodeEvents().size());
     Assert.assertEquals(1, allocator.getTaskAttemptKillEvents().size());
-    Assert.assertEquals(2, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
-    Assert.assertEquals(attemptId, allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
+    Assert.assertEquals(2,
+        allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
+    Assert.assertEquals(attemptId,
+        allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
     allocator.getJobUpdatedNodeEvents().clear();
     allocator.getTaskAttemptKillEvents().clear();
-    
+
     assigned = allocator.schedule();
     rm.drainEvents();
     Assert.assertEquals(0, assigned.size());
@@ -1307,7 +1337,7 @@ public class TestRMContainerAllocator {
 
   @Test
   public void testBlackListedNodes() throws Exception {
-    
+
     LOG.info("Running testBlackListedNodes");
 
     Configuration conf = new Configuration();
@@ -1315,7 +1345,7 @@ public class TestRMContainerAllocator {
     conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
     conf.setInt(
         MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
-    
+
     MyResourceManager rm = new MyResourceManager(conf);
     rm.start();
 
@@ -1331,7 +1361,7 @@ public class TestRMContainerAllocator {
         .getAppAttemptId();
     rm.sendAMLaunched(appAttemptId);
     rm.drainEvents();
-    
+
     JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
@@ -1347,18 +1377,24 @@ public class TestRMContainerAllocator {
     rm.drainEvents();
 
     // create the container request
-    ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
-        new String[] { "h1" });
+    ContainerRequestEvent event1 =
+            ContainerRequestCreator.createRequest(jobId, 1,
+                    Resource.newInstance(1024, 1),
+                    new String[] {"h1"});
     allocator.sendRequest(event1);
 
     // send 1 more request with different resource req
-    ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
-        new String[] { "h2" });
+    ContainerRequestEvent event2 =
+            ContainerRequestCreator.createRequest(jobId, 2,
+                    Resource.newInstance(1024, 1),
+                    new String[] {"h2"});
     allocator.sendRequest(event2);
 
     // send another request with different resource and priority
-    ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
-        new String[] { "h3" });
+    ContainerRequestEvent event3 =
+            ContainerRequestCreator.createRequest(jobId, 3,
+                    Resource.newInstance(1024, 1),
+                    new String[] {"h3"});
     allocator.sendRequest(event3);
 
     // this tells the scheduler about the requests
@@ -1368,9 +1404,9 @@ public class TestRMContainerAllocator {
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 
     // Send events to blacklist nodes h1 and h2
-    ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);            
+    ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
     allocator.sendFailure(f1);
-    ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h2", false);            
+    ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h2", false);
     allocator.sendFailure(f2);
 
     // update resources in scheduler
@@ -1392,23 +1428,23 @@ public class TestRMContainerAllocator {
     assigned = allocator.schedule();
     rm.drainEvents();
     assertBlacklistAdditionsAndRemovals(0, 0, rm);
-    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());    
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 
     nodeManager3.nodeHeartbeat(true); // Node heartbeat
     rm.drainEvents();
     assigned = allocator.schedule();
     rm.drainEvents();
     assertBlacklistAdditionsAndRemovals(0, 0, rm);
-        
+
     Assert.assertTrue("No of assignments must be 3", assigned.size() == 3);
-    
+
     // validate that all containers are assigned to h3
     for (TaskAttemptContainerAssignedEvent assig : assigned) {
       Assert.assertTrue("Assigned container host not correct", "h3".equals(assig
           .getContainer().getNodeId().getHost()));
     }
   }
-  
+
   @Test
   public void testIgnoreBlacklisting() throws Exception {
     LOG.info("Running testIgnoreBlacklisting");
@@ -1448,7 +1484,7 @@ public class TestRMContainerAllocator {
 
     // Known=1, blacklisted=0, ignore should be false - assign first container
     assigned =
-        getContainerOnHost(jobId, 1, 1024, new String[] { "h1" },
+        getContainerOnHost(jobId, 1, 1024, new String[] {"h1"},
             nodeManagers[0], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
@@ -1463,47 +1499,47 @@ public class TestRMContainerAllocator {
     // Because makeRemoteRequest will not be aware of it until next call
     // The current call will send blacklisted node "h1" to RM
     assigned =
-        getContainerOnHost(jobId, 2, 1024, new String[] { "h1" },
+        getContainerOnHost(jobId, 2, 1024, new String[] {"h1"},
             nodeManagers[0], allocator, 1, 0, 0, 1, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 
     // Known=1, blacklisted=1, ignore should be true - assign 1
     assigned =
-        getContainerOnHost(jobId, 2, 1024, new String[] { "h1" },
+        getContainerOnHost(jobId, 2, 1024, new String[] {"h1"},
             nodeManagers[0], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     nodeManagers[nmNum] = registerNodeManager(nmNum++, rm);
     // Known=2, blacklisted=1, ignore should be true - assign 1 anyway.
     assigned =
-        getContainerOnHost(jobId, 3, 1024, new String[] { "h2" },
+        getContainerOnHost(jobId, 3, 1024, new String[] {"h2"},
             nodeManagers[1], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     nodeManagers[nmNum] = registerNodeManager(nmNum++, rm);
     // Known=3, blacklisted=1, ignore should be true - assign 1 anyway.
     assigned =
-        getContainerOnHost(jobId, 4, 1024, new String[] { "h3" },
+        getContainerOnHost(jobId, 4, 1024, new String[] {"h3"},
             nodeManagers[2], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     // Known=3, blacklisted=1, ignore should be true - assign 1
     assigned =
-        getContainerOnHost(jobId, 5, 1024, new String[] { "h1" },
+        getContainerOnHost(jobId, 5, 1024, new String[] {"h1"},
             nodeManagers[0], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     nodeManagers[nmNum] = registerNodeManager(nmNum++, rm);
     // Known=4, blacklisted=1, ignore should be false - assign 1 anyway
     assigned =
-        getContainerOnHost(jobId, 6, 1024, new String[] { "h4" },
+        getContainerOnHost(jobId, 6, 1024, new String[] {"h4"},
             nodeManagers[3], allocator, 0, 0, 1, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     // Test blacklisting re-enabled.
     // Known=4, blacklisted=1, ignore should be false - no assignment on h1
     assigned =
-        getContainerOnHost(jobId, 7, 1024, new String[] { "h1" },
+        getContainerOnHost(jobId, 7, 1024, new String[] {"h1"},
             nodeManagers[0], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
     // RMContainerRequestor would have created a replacement request.
@@ -1516,20 +1552,20 @@ public class TestRMContainerAllocator {
     // Known=4, blacklisted=2, ignore should be true. Should assign 0
     // container for the same reason above.
     assigned =
-        getContainerOnHost(jobId, 8, 1024, new String[] { "h1" },
+        getContainerOnHost(jobId, 8, 1024, new String[] {"h1"},
             nodeManagers[0], allocator, 1, 0, 0, 2, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 
     // Known=4, blacklisted=2, ignore should be true. Should assign 2
     // containers.
     assigned =
-        getContainerOnHost(jobId, 8, 1024, new String[] { "h1" },
+        getContainerOnHost(jobId, 8, 1024, new String[] {"h1"},
             nodeManagers[0], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 2", 2, assigned.size());
 
     // Known=4, blacklisted=2, ignore should be true.
     assigned =
-        getContainerOnHost(jobId, 9, 1024, new String[] { "h2" },
+        getContainerOnHost(jobId, 9, 1024, new String[] {"h2"},
             nodeManagers[1], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
@@ -1540,23 +1576,23 @@ public class TestRMContainerAllocator {
     nodeManagers[nmNum] = registerNodeManager(nmNum++, rm);
     // Known=5, blacklisted=3, ignore should be true.
     assigned =
-        getContainerOnHost(jobId, 10, 1024, new String[] { "h3" },
+        getContainerOnHost(jobId, 10, 1024, new String[] {"h3"},
             nodeManagers[2], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
-    
+
     // Assign on 5 more nodes - to re-enable blacklisting
     for (int i = 0; i < 5; i++) {
       nodeManagers[nmNum] = registerNodeManager(nmNum++, rm);
       assigned =
           getContainerOnHost(jobId, 11 + i, 1024,
-              new String[] { String.valueOf(5 + i) }, nodeManagers[4 + i],
+              new String[] {String.valueOf(5 + i)}, nodeManagers[4 + i],
               allocator, 0, 0, (i == 4 ? 3 : 0), 0, rm);
       Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
     }
 
     // Test h3 (blacklisted while ignoring blacklisting) is blacklisted.
     assigned =
-        getContainerOnHost(jobId, 20, 1024, new String[] { "h3" },
+        getContainerOnHost(jobId, 20, 1024, new String[] {"h3"},
             nodeManagers[2], allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
   }
@@ -1576,7 +1612,8 @@ public class TestRMContainerAllocator {
           int expectedAdditions2, int expectedRemovals2, MyResourceManager rm)
           throws Exception {
     ContainerRequestEvent reqEvent =
-        createReq(jobId, taskAttemptId, memory, hosts);
+            ContainerRequestCreator.createRequest(jobId, taskAttemptId,
+                    Resource.newInstance(memory, 1), hosts);
     allocator.sendRequest(reqEvent);
 
     // Send the request to the RM
@@ -1596,7 +1633,7 @@ public class TestRMContainerAllocator {
         expectedAdditions2, expectedRemovals2, rm);
     return assigned;
   }
- 
+
   @Test
   public void testBlackListedNodesWithSchedulingToThatNode() throws Exception {
     LOG.info("Running testBlackListedNodesWithSchedulingToThatNode");
@@ -1606,7 +1643,7 @@ public class TestRMContainerAllocator {
     conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
     conf.setInt(
         MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
-    
+
     MyResourceManager rm = new MyResourceManager(conf);
     rm.start();
 
@@ -1622,7 +1659,7 @@ public class TestRMContainerAllocator {
         .getAppAttemptId();
     rm.sendAMLaunched(appAttemptId);
     rm.drainEvents();
-    
+
     JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
@@ -1638,8 +1675,10 @@ public class TestRMContainerAllocator {
 
     LOG.info("Requesting 1 Containers _1 on H1");
     // create the container request
-    ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
-        new String[] { "h1" });
+    ContainerRequestEvent event1 =
+            ContainerRequestCreator.createRequest(jobId, 1,
+                    Resource.newInstance(1024, 1),
+                    new String[] {"h1"});
     allocator.sendRequest(event1);
 
     LOG.info("RM Heartbeat (to send the container requests)");
@@ -1653,13 +1692,13 @@ public class TestRMContainerAllocator {
     // update resources in scheduler
     nodeManager1.nodeHeartbeat(true); // Node heartbeat
     rm.drainEvents();
-    
+
     LOG.info("RM Heartbeat (To process the scheduled containers)");
     assigned = allocator.schedule();
     rm.drainEvents();
     assertBlacklistAdditionsAndRemovals(0, 0, rm);
-    Assert.assertEquals("No of assignments must be 1", 1, assigned.size());    
-    
+    Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
+
     LOG.info("Failing container _1 on H1 (should blacklist the node)");
     // Send events to blacklist nodes h1 and h2
     ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
@@ -1667,8 +1706,9 @@ public class TestRMContainerAllocator {
 
     //At this stage, a request should be created for a fast fail map
     //Create a FAST_FAIL request for a previously failed map.
-    ContainerRequestEvent event1f = createReq(jobId, 1, 1024,
-        new String[] { "h1" }, true, false);
+    ContainerRequestEvent event1f = createRequest(jobId, 1,
+            Resource.newInstance(1024, 1),
+            new String[] {"h1"}, true, false);
     allocator.sendRequest(event1f);
 
     //Update the Scheduler with the new requests.
@@ -1678,24 +1718,26 @@ public class TestRMContainerAllocator {
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 
     // send another request with different resource and priority
-    ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
-        new String[] { "h1", "h3" });
+    ContainerRequestEvent event3 =
+            ContainerRequestCreator.createRequest(jobId, 3,
+                    Resource.newInstance(1024, 1),
+                    new String[] {"h1", "h3"});
     allocator.sendRequest(event3);
-    
+
     //Allocator is aware of prio:5 container, and prio:20 (h1+h3) container.
     //RM is only aware of the prio:5 container
-    
+
     LOG.info("h1 Heartbeat (To actually schedule the containers)");
     // update resources in scheduler
     nodeManager1.nodeHeartbeat(true); // Node heartbeat
     rm.drainEvents();
-    
+
     LOG.info("RM Heartbeat (To process the scheduled containers)");
     assigned = allocator.schedule();
     rm.drainEvents();
     assertBlacklistAdditionsAndRemovals(0, 0, rm);
-    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());    
-    
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
     //RMContainerAllocator gets assigned a p:5 on a blacklisted node.
 
     //Send a release for the p:5 container + another request.
@@ -1704,26 +1746,26 @@ public class TestRMContainerAllocator {
     rm.drainEvents();
     assertBlacklistAdditionsAndRemovals(0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-    
+
     //Hearbeat from H3 to schedule on this host.
     LOG.info("h3 Heartbeat (To re-schedule the containers)");
     nodeManager3.nodeHeartbeat(true); // Node heartbeat
     rm.drainEvents();
-    
+
     LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)");
     assigned = allocator.schedule();
     assertBlacklistAdditionsAndRemovals(0, 0, rm);
     rm.drainEvents();
-     
+
     // For debugging
     for (TaskAttemptContainerAssignedEvent assig : assigned) {
       LOG.info(assig.getTaskAttemptID() +
           " assgined to " + assig.getContainer().getId() +
           " with priority " + assig.getContainer().getPriority());
     }
-    
+
     Assert.assertEquals("No of assignments must be 2", 2, assigned.size());
-    
+
     // validate that all containers are assigned to h3
     for (TaskAttemptContainerAssignedEvent assig : assigned) {
       Assert.assertEquals("Assigned container " + assig.getContainer().getId()
@@ -1759,13 +1801,13 @@ public class TestRMContainerAllocator {
         assert (false);
       }
     }
-    
+
     List<ResourceRequest> lastAsk = null;
     List<ContainerId> lastRelease = null;
     List<String> lastBlacklistAdditions;
     List<String> lastBlacklistRemovals;
     Resource forceResourceLimit = null;
-    
+
     // override this to copy the objects otherwise FifoScheduler updates the
     // numContainers in same objects as kept by RMContainerAllocator
     @Override
@@ -1855,38 +1897,6 @@ public class TestRMContainerAllocator {
     }
   }
 
-  private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId,
-      int memory, String[] hosts) {
-    return createReq(jobId, taskAttemptId, memory, 1, hosts, false, false);
-  }
-
-  private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId,
-      int mem, String[] hosts, boolean earlierFailedAttempt, boolean reduce) {
-    return createReq(jobId, taskAttemptId, mem,
-        1, hosts, earlierFailedAttempt, reduce);
-  }
-
-  private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId,
-      int memory, int vcore, String[] hosts, boolean earlierFailedAttempt,
-      boolean reduce) {
-    TaskId taskId;
-    if (reduce) {
-      taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
-    } else {
-      taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
-    }
-    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
-        taskAttemptId);
-    Resource containerNeed = Resource.newInstance(memory, vcore);
-    if (earlierFailedAttempt) {
-      return ContainerRequestEvent
-          .createContainerRequestEventForFailedContainer(attemptId,
-              containerNeed);
-    }
-    return new ContainerRequestEvent(attemptId, containerNeed, hosts,
-        new String[] { NetworkTopology.DEFAULT_RACK });
-  }
-
   private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
       String host, boolean reduce) {
     TaskId taskId;
@@ -1897,9 +1907,9 @@ public class TestRMContainerAllocator {
     }
     TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
         taskAttemptId);
-    return new ContainerFailedEvent(attemptId, host);    
+    return new ContainerFailedEvent(attemptId, host);
   }
-  
+
   private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
       int taskAttemptId, boolean reduce) {
     TaskId taskId;
@@ -1957,14 +1967,14 @@ public class TestRMContainerAllocator {
 
   // Mock RMContainerAllocator
   // Instead of talking to remote Scheduler,uses the local Scheduler
-  private static class MyContainerAllocator extends RMContainerAllocator {
-    static final List<TaskAttemptContainerAssignedEvent> events
-      = new ArrayList<TaskAttemptContainerAssignedEvent>();
-    static final List<TaskAttemptKillEvent> taskAttemptKillEvents 
-      = new ArrayList<TaskAttemptKillEvent>();
-    static final List<JobUpdatedNodesEvent> jobUpdatedNodeEvents 
-    = new ArrayList<JobUpdatedNodesEvent>();
-    static final List<JobEvent> jobEvents = new ArrayList<JobEvent>();
+  static class MyContainerAllocator extends RMContainerAllocator {
+    static final List<TaskAttemptContainerAssignedEvent> events =
+        new ArrayList<>();
+    static final List<TaskAttemptKillEvent> taskAttemptKillEvents =
+        new ArrayList<>();
+    static final List<JobUpdatedNodesEvent> jobUpdatedNodeEvents =
+        new ArrayList<>();
+    static final List<JobEvent> jobEvents = new ArrayList<>();
     private MyResourceManager rm;
     private boolean isUnregistered = false;
     private AllocateResponse allocateResponse;
@@ -2069,7 +2079,7 @@ public class TestRMContainerAllocator {
     }
 
     public void sendRequest(ContainerRequestEvent req) {
-      sendRequests(Arrays.asList(new ContainerRequestEvent[] { req }));
+      sendRequests(Arrays.asList(new ContainerRequestEvent[] {req}));
     }
 
     public void sendRequests(List<ContainerRequestEvent> reqs) {
@@ -2081,7 +2091,7 @@ public class TestRMContainerAllocator {
     public void sendFailure(ContainerFailedEvent f) {
       super.handleEvent(f);
     }
-    
+
     public void sendDeallocate(ContainerAllocatorEvent f) {
       super.handleEvent(f);
     }
@@ -2099,16 +2109,15 @@ public class TestRMContainerAllocator {
       // run the scheduler
       super.heartbeat();
 
-      List<TaskAttemptContainerAssignedEvent> result
-        = new ArrayList<TaskAttemptContainerAssignedEvent>(events);
+      List<TaskAttemptContainerAssignedEvent> result = new ArrayList<>(events);
       events.clear();
       return result;
     }
-    
+
     static List<TaskAttemptKillEvent> getTaskAttemptKillEvents() {
       return taskAttemptKillEvents;
     }
-    
+
     static List<JobUpdatedNodesEvent> getJobUpdatedNodeEvents() {
       return jobUpdatedNodeEvents;
     }
@@ -2117,12 +2126,12 @@ public class TestRMContainerAllocator {
     protected void startAllocatorThread() {
       // override to NOT start thread
     }
-    
+
     @Override
     protected boolean isApplicationMasterRegistered() {
       return super.isApplicationMasterRegistered();
     }
-    
+
     public boolean isUnregistered() {
       return isUnregistered;
     }
@@ -2164,7 +2173,7 @@ public class TestRMContainerAllocator {
     int numPendingReduces = 4;
     float maxReduceRampupLimit = 0.5f;
     float reduceSlowStart = 0.2f;
-    
+
     RMContainerAllocator allocator = mock(RMContainerAllocator.class);
     doCallRealMethod().when(allocator).scheduleReduces(anyInt(), anyInt(),
         anyInt(), anyInt(), anyInt(), anyInt(), any(Resource.class),
@@ -2174,14 +2183,14 @@ public class TestRMContainerAllocator {
 
     // Test slow-start
     allocator.scheduleReduces(
-        totalMaps, succeededMaps, 
-        scheduledMaps, scheduledReduces, 
-        assignedMaps, assignedReduces, 
-        mapResourceReqt, reduceResourceReqt, 
-        numPendingReduces, 
+        totalMaps, succeededMaps,
+        scheduledMaps, scheduledReduces,
+        assignedMaps, assignedReduces,
+        mapResourceReqt, reduceResourceReqt,
+        numPendingReduces,
         maxReduceRampupLimit, reduceSlowStart);
     verify(allocator, never()).setIsReduceStarted(true);
-    
+
     // verify slow-start still in effect when no more maps need to
     // be scheduled but some have yet to complete
     allocator.scheduleReduces(
@@ -2197,23 +2206,23 @@ public class TestRMContainerAllocator {
     succeededMaps = 3;
     doReturn(BuilderUtils.newResource(0, 0)).when(allocator).getResourceLimit();
     allocator.scheduleReduces(
-        totalMaps, succeededMaps, 
-        scheduledMaps, scheduledReduces, 
-        assignedMaps, assignedReduces, 
-        mapResourceReqt, reduceResourceReqt, 
-        numPendingReduces, 
+        totalMaps, succeededMaps,
+        scheduledMaps, scheduledReduces,
+        assignedMaps, assignedReduces,
+        mapResourceReqt, reduceResourceReqt,
+        numPendingReduces,
         maxReduceRampupLimit, reduceSlowStart);
     verify(allocator, times(1)).setIsReduceStarted(true);
-    
+
     // Test reduce ramp-up
     doReturn(BuilderUtils.newResource(100 * 1024, 100 * 1)).when(allocator)
       .getResourceLimit();
     allocator.scheduleReduces(
-        totalMaps, succeededMaps, 
-        scheduledMaps, scheduledReduces, 
-        assignedMaps, assignedReduces, 
-        mapResourceReqt, reduceResourceReqt, 
-        numPendingReduces, 
+        totalMaps, succeededMaps,
+        scheduledMaps, scheduledReduces,
+        assignedMaps, assignedReduces,
+        mapResourceReqt, reduceResourceReqt,
+        numPendingReduces,
         maxReduceRampupLimit, reduceSlowStart);
     verify(allocator).rampUpReduces(anyInt());
     verify(allocator, never()).rampDownReduces(anyInt());
@@ -2232,18 +2241,18 @@ public class TestRMContainerAllocator {
     verify(allocator).rampDownReduces(anyInt());
 
     // Test reduce ramp-down for when there are scheduled maps
-    // Since we have two scheduled Maps, rampDownReducers 
+    // Since we have two scheduled Maps, rampDownReducers
     // should be invoked twice.
     scheduledMaps = 2;
     assignedReduces = 2;
     doReturn(BuilderUtils.newResource(10 * 1024, 10 * 1)).when(allocator)
       .getResourceLimit();
     allocator.scheduleReduces(
-        totalMaps, succeededMaps, 
-        scheduledMaps, scheduledReduces, 
-        assignedMaps, assignedReduces, 
-        mapResourceReqt, reduceResourceReqt, 
-        numPendingReduces, 
+        totalMaps, succeededMaps,
+        scheduledMaps, scheduledReduces,
+        assignedMaps, assignedReduces,
+        mapResourceReqt, reduceResourceReqt,
+        numPendingReduces,
         maxReduceRampupLimit, reduceSlowStart);
     verify(allocator, times(2)).rampDownReduces(anyInt());
 
@@ -2288,7 +2297,7 @@ public class TestRMContainerAllocator {
       recalculatedReduceSchedule = true;
     }
   }
-  
+
   @Test
   public void testCompletedTasksRecalculateSchedule() throws Exception {
     LOG.info("Running testCompletedTasksRecalculateSchedule");
@@ -2400,31 +2409,33 @@ public class TestRMContainerAllocator {
     RMContainerAllocator allocator = new RMContainerAllocator(
         mock(ClientService.class), mock(AppContext.class),
         new NoopAMPreemptionPolicy());
-    
+
     TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(
         MRBuilderUtils.newTaskId(
             MRBuilderUtils.newJobId(1, 1, 1), 1, TaskType.MAP), 1);
     ApplicationId applicationId = ApplicationId.newInstance(1, 1);
-    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(
-        applicationId, 1);
-    ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 1);
+    ApplicationAttemptId applicationAttemptId =
+        ApplicationAttemptId.newInstance(applicationId, 1);
+    ContainerId containerId =
+        ContainerId.newContainerId(applicationAttemptId, 1);
     ContainerStatus status = ContainerStatus.newInstance(
         containerId, ContainerState.RUNNING, "", 0);
 
     ContainerStatus abortedStatus = ContainerStatus.newInstance(
         containerId, ContainerState.RUNNING, "",
         ContainerExitStatus.ABORTED);
-    
+
     TaskAttemptEvent event = allocator.createContainerFinishedEvent(status,
         attemptId);
     Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
         event.getType());
-    
+
     TaskAttemptEvent abortedEvent = allocator.createContainerFinishedEvent(
         abortedStatus, attemptId);
     Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType());
-    
-    ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2);
+
+    ContainerId containerId2 =
+        ContainerId.newContainerId(applicationAttemptId, 2);
     ContainerStatus status2 = ContainerStatus.newInstance(containerId2,
         ContainerState.RUNNING, "", 0);
 
@@ -2440,7 +2451,7 @@ public class TestRMContainerAllocator {
         preemptedStatus, attemptId);
     Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType());
   }
-  
+
   @Test
   public void testUnregistrationOnlyIfRegistered() throws Exception {
     Configuration conf = new Configuration();
@@ -2483,7 +2494,7 @@ public class TestRMContainerAllocator {
     mrApp.stop();
     Assert.assertTrue(allocator.isUnregistered());
   }
-  
+
   // Step-1 : AM send allocate request for 2 ContainerRequests and 1
   // blackListeNode
   // Step-2 : 2 containers are allocated by RM.
@@ -2542,11 +2553,15 @@ public class TestRMContainerAllocator {
     // create the container request
     // send MAP request
     ContainerRequestEvent event1 =
-        createReq(jobId, 1, 1024, new String[] { "h1" });
+            ContainerRequestCreator.createRequest(jobId, 1,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h1"});
     allocator.sendRequest(event1);
 
     ContainerRequestEvent event2 =
-        createReq(jobId, 2, 2048, new String[] { "h1", "h2" });
+        ContainerRequestCreator.createRequest(jobId, 2,
+                Resource.newInstance(2048, 1),
+                new String[] {"h1", "h2"});
     allocator.sendRequest(event2);
 
     // Send events to blacklist h2
@@ -2584,7 +2599,9 @@ public class TestRMContainerAllocator {
     // RM
     // send container request
     ContainerRequestEvent event3 =
-        createReq(jobId, 3, 1000, new String[] { "h1" });
+            ContainerRequestCreator.createRequest(jobId, 3,
+                    Resource.newInstance(1000, 1),
+                    new String[]{"h1"});
     allocator.sendRequest(event3);
 
     // send deallocate request
@@ -2628,7 +2645,9 @@ public class TestRMContainerAllocator {
     allocator.sendFailure(f2);
 
     ContainerRequestEvent event4 =
-        createReq(jobId, 4, 2000, new String[] { "h1", "h2" });
+            ContainerRequestCreator.createRequest(jobId, 4,
+                    Resource.newInstance(2000, 1),
+                    new String[]{"h1", "h2"});
     allocator.sendRequest(event4);
 
     // send allocate request to 2nd RM and get resync command
@@ -2639,7 +2658,9 @@ public class TestRMContainerAllocator {
     // asks,release,blacklistAaddition
     // and another containerRequest(event5)
     ContainerRequestEvent event5 =
-        createReq(jobId, 5, 3000, new String[] { "h1", "h2", "h3" });
+            ContainerRequestCreator.createRequest(jobId, 5,
+                    Resource.newInstance(3000, 1),
+                    new String[]{"h1", "h2", "h3"});
     allocator.sendRequest(event5);
 
     // send all outstanding request again.
@@ -2696,9 +2717,10 @@ public class TestRMContainerAllocator {
       }
     };
 
-    ContainerRequestEvent mapRequestEvt = createReq(jobId, 0,
-        (int) (maxContainerSupported.getMemorySize() + 10),
-        maxContainerSupported.getVirtualCores(),
+    final int memory = (int) (maxContainerSupported.getMemorySize() + 10);
+    ContainerRequestEvent mapRequestEvt = createRequest(jobId, 0,
+            Resource.newInstance(memory,
+            maxContainerSupported.getVirtualCores()),
         new String[0], false, false);
     allocator.sendRequests(Arrays.asList(mapRequestEvt));
     allocator.schedule();
@@ -2734,10 +2756,11 @@ public class TestRMContainerAllocator {
       }
     };
 
-    ContainerRequestEvent reduceRequestEvt = createReq(jobId, 0,
-        (int) (maxContainerSupported.getMemorySize() + 10),
-        maxContainerSupported.getVirtualCores(),
-        new String[0], false, true);
+    final int memory = (int) (maxContainerSupported.getMemorySize() + 10);
+    ContainerRequestEvent reduceRequestEvt = createRequest(jobId, 0,
+            Resource.newInstance(memory,
+            maxContainerSupported.getVirtualCores()),
+            new String[0], false, true);
     allocator.sendRequests(Arrays.asList(reduceRequestEvt));
     // Reducer container requests are added to the pending queue upon request,
     // schedule all reducers here so that we can observe if reducer requests
@@ -2787,8 +2810,9 @@ public class TestRMContainerAllocator {
     rm1.drainEvents();
     Assert.assertEquals("Should Have 1 Job Event", 1,
         allocator.jobEvents.size());
-    JobEvent event = allocator.jobEvents.get(0); 
-    Assert.assertTrue("Should Reboot", event.getType().equals(JobEventType.JOB_AM_REBOOT));
+    JobEvent event = allocator.jobEvents.get(0);
+    Assert.assertTrue("Should Reboot",
+        event.getType().equals(JobEventType.JOB_AM_REBOOT));
   }
 
   @Test(timeout=60000)
@@ -2920,7 +2944,9 @@ public class TestRMContainerAllocator {
     // create some map requests
     ContainerRequestEvent[] reqMapEvents = new ContainerRequestEvent[MAP_COUNT];
     for (int i = 0; i < reqMapEvents.length; ++i) {
-      reqMapEvents[i] = createReq(jobId, i, 1024, new String[] { "h" + i });
+      reqMapEvents[i] = ContainerRequestCreator.createRequest(jobId, i,
+              Resource.newInstance(1024, 1),
+              new String[] {"h" + i});
     }
     allocator.sendRequests(Arrays.asList(reqMapEvents));
     // create some reduce requests
@@ -2928,7 +2954,8 @@ public class TestRMContainerAllocator {
         new ContainerRequestEvent[REDUCE_COUNT];
     for (int i = 0; i < reqReduceEvents.length; ++i) {
       reqReduceEvents[i] =
-          createReq(jobId, i, 1024, new String[] {}, false, true);
+          createRequest(jobId, i, Resource.newInstance(1024, 1),
+                  new String[] {}, false, true);
     }
     allocator.sendRequests(Arrays.asList(reqReduceEvents));
     allocator.schedule();
@@ -2975,14 +3002,17 @@ public class TestRMContainerAllocator {
     // create some map requests
     ContainerRequestEvent[] reqMapEvents = new ContainerRequestEvent[MAP_COUNT];
     for (int i = 0; i < reqMapEvents.length; ++i) {
-      reqMapEvents[i] = createReq(jobId, i, 1024, new String[] { "h" + i });
+      reqMapEvents[i] = ContainerRequestCreator.createRequest(jobId, i,
+          Resource.newInstance(1024, 1), new String[] {"h" + i});
     }
     allocator.sendRequests(Arrays.asList(reqMapEvents));
     // create some reduce requests
-    ContainerRequestEvent[] reqReduceEvents = new ContainerRequestEvent[REDUCE_COUNT];
+    ContainerRequestEvent[] reqReduceEvents =
+        new ContainerRequestEvent[REDUCE_COUNT];
     for (int i = 0; i < reqReduceEvents.length; ++i) {
-      reqReduceEvents[i] = createReq(jobId, i, 1024, new String[] {},
-          false, true);
+      reqReduceEvents[i] =
+          createRequest(jobId, i, Resource.newInstance(1024, 1),
+              new String[] {}, false, true);
     }
     allocator.sendRequests(Arrays.asList(reqReduceEvents));
     allocator.schedule();
@@ -3137,13 +3167,19 @@ public class TestRMContainerAllocator {
 
     // Request 2 maps and 1 reducer(sone on nodes which are not registered).
     ContainerRequestEvent event1 =
-        createReq(jobId, 1, 1024, new String[] { "h1" });
+            ContainerRequestCreator.createRequest(jobId, 1,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h1"});
     allocator.sendRequest(event1);
     ContainerRequestEvent event2 =
-        createReq(jobId, 2, 1024, new String[] { "h2" });
+            ContainerRequestCreator.createRequest(jobId, 2,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h2"});
     allocator.sendRequest(event2);
     ContainerRequestEvent event3 =
-        createReq(jobId, 3, 1024, new String[] { "h2" }, false, true);
+            createRequest(jobId, 3,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h2"}, false, true);
     allocator.sendRequest(event3);
 
     // This will tell the scheduler about the requests but there will be no
@@ -3156,7 +3192,8 @@ public class TestRMContainerAllocator {
 
     // Request for another reducer on h3 which has not registered.
     ContainerRequestEvent event4 =
-        createReq(jobId, 4, 1024, new String[] { "h3" }, false, true);
+        createRequest(jobId, 4, Resource.newInstance(1024, 1),
+                new String[] {"h3"}, false, true);
     allocator.sendRequest(event4);
 
     allocator.schedule();
@@ -3301,13 +3338,18 @@ public class TestRMContainerAllocator {
 
     // Request 2 maps and 1 reducer(sone on nodes which are not registered).
     ContainerRequestEvent event1 =
-        createReq(jobId, 1, 1024, new String[] { "h1" });
+            ContainerRequestCreator.createRequest(jobId, 1,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h1"});
     allocator.sendRequest(event1);
     ContainerRequestEvent event2 =
-        createReq(jobId, 2, 1024, new String[] { "h2" });
+            ContainerRequestCreator.createRequest(jobId, 2,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h2"});
     allocator.sendRequest(event2);
     ContainerRequestEvent event3 =
-        createReq(jobId, 3, 1024, new String[] { "h2" }, false, true);
+            createRequest(jobId, 3, Resource.newInstance(1024, 1),
+                    new String[]{"h2"}, false, true);
     allocator.sendRequest(event3);
 
     // This will tell the scheduler about the requests but there will be no
@@ -3320,7 +3362,8 @@ public class TestRMContainerAllocator {
 
     // Request for another reducer on h3 which has not registered.
     ContainerRequestEvent event4 =
-        createReq(jobId, 4, 1024, new String[] { "h3" }, false, true);
+            createRequest(jobId, 4, Resource.newInstance(1024, 1),
+                    new String[]{"h3"}, false, true);
     allocator.sendRequest(event4);
 
     allocator.schedule();
@@ -3433,13 +3476,19 @@ public class TestRMContainerAllocator {
 
     // Request 2 maps and 1 reducer(sone on nodes which are not registered).
     ContainerRequestEvent event1 =
-        createReq(jobId, 1, 1024, new String[] { "h1" });
+            ContainerRequestCreator.createRequest(jobId, 1,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h1"});
     allocator.sendRequest(event1);
     ContainerRequestEvent event2 =
-        createReq(jobId, 2, 1024, new String[] { "h2" });
+            ContainerRequestCreator.createRequest(jobId, 2,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h2"});
     allocator.sendRequest(event2);
     ContainerRequestEvent event3 =
-         createReq(jobId, 3, 1024, new String[] { "h1" }, false, true);
+            createRequest(jobId, 3,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h1"}, false, true);
     allocator.sendRequest(event3);
 
     // This will tell the scheduler about the requests but there will be no
@@ -3449,7 +3498,8 @@ public class TestRMContainerAllocator {
 
     // Request for another reducer on h3 which has not registered.
     ContainerRequestEvent event4 =
-        createReq(jobId, 4, 1024, new String[] { "h3" }, false, true);
+        createRequest(jobId, 4, Resource.newInstance(1024, 1),
+                new String[] {"h3"}, false, true);
     allocator.sendRequest(event4);
 
     allocator.schedule();
@@ -3486,7 +3536,9 @@ public class TestRMContainerAllocator {
 
     // Send request for one more mapper.
     ContainerRequestEvent event5 =
-        createReq(jobId, 5, 1024, new String[] { "h1" });
+            ContainerRequestCreator.createRequest(jobId, 5,
+                    Resource.newInstance(1024, 1),
+                    new String[]{"h1"});
     allocator.sendRequest(event5);
 
     rm.getMyFifoScheduler().forceResourceLimit(Resource.newInstance(2048, 2));
@@ -3528,7 +3580,7 @@ public class TestRMContainerAllocator {
       return RegisterApplicationMasterResponse.newInstance(
           Resource.newInstance(512, 1),
           Resource.newInstance(512000, 1024),
-          Collections.<ApplicationAccessType,String>emptyMap(),
+          Collections.emptyMap(),
           ByteBuffer.wrap("fake_key".getBytes()),
           Collections.<Container>emptyList(),
           "default",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
index 7a212e1..1da2fed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
@@ -175,16 +175,8 @@ public class UnitsConversionUtil {
    */
   public static int compare(String unitA, long valueA, String unitB,
       long valueB) {
-    if (unitA == null || unitB == null || !KNOWN_UNITS.contains(unitA)
-        || !KNOWN_UNITS.contains(unitB)) {
-      throw new IllegalArgumentException("Units cannot be null");
-    }
-    if (!KNOWN_UNITS.contains(unitA)) {
-      throw new IllegalArgumentException("Unknown unit '" + unitA + "'");
-    }
-    if (!KNOWN_UNITS.contains(unitB)) {
-      throw new IllegalArgumentException("Unknown unit '" + unitB + "'");
-    }
+    checkUnitArgument(unitA);
+    checkUnitArgument(unitB);
     if (unitA.equals(unitB)) {
       return Long.compare(valueA, valueB);
     }
@@ -218,4 +210,36 @@ public class UnitsConversionUtil {
       return tmpA.compareTo(tmpB);
     }
   }
+
+  private static void checkUnitArgument(String unit) {
+    if (unit == null) {
+      throw new IllegalArgumentException("Unit cannot be null");
+    } else if (!KNOWN_UNITS.contains(unit)) {
+      throw new IllegalArgumentException("Unknown unit '" + unit + "'");
+    }
+  }
+
+  /**
+   * Compare a unit to another unit.
+   * <br>
+   * Examples:<br>
+   * 1. 'm' (milli) is smaller than 'k' (kilo), so compareUnits("m", "k")
+   * will return -1.<br>
+   * 2. 'M' (MEGA) is greater than 'k' (kilo), so compareUnits("M", "k") will
+   * return 1.
+   *
+   * @param unitA first unit
+   * @param unitB second unit
+   * @return +1, 0 or -1 depending on whether the relationship between units
+   * is smaller than,
+   * equal to or lesser than.
+   */
+  public static int compareUnits(String unitA, String unitB) {
+    checkUnitArgument(unitA);
+    checkUnitArgument(unitB);
+    int unitAPos = SORTED_UNITS.indexOf(unitA);
+    int unitBPos = SORTED_UNITS.indexOf(unitB);
+
+    return Integer.compare(unitAPos, unitBPos);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
new file mode 100644
index 0000000..98a8a00
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.resourcetypes;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Contains helper methods to create Resource and ResourceInformation objects.
+ * ResourceInformation can be created from a resource name
+ * and a resource descriptor as well that comprises amount and unit.
+ */
+public final class ResourceTypesTestHelper {
+
+  private static final Pattern RESOURCE_VALUE_AND_UNIT_PATTERN =
+      Pattern.compile("(\\d+)([A-za-z]*)");
+
+  private ResourceTypesTestHelper() {}
+
+  private static final RecordFactory RECORD_FACTORY = RecordFactoryProvider
+          .getRecordFactory(null);
+
+  private static final class ResourceValueAndUnit {
+    private final Long value;
+    private final String unit;
+
+    private ResourceValueAndUnit(Long value, String unit) {
+      this.value = value;
+      this.unit = unit;
+    }
+  }
+
+  public static Resource newResource(long memory, int vCores, Map<String,
+          String> customResources) {
+    Resource resource = RECORD_FACTORY.newRecordInstance(Resource.class);
+    resource.setMemorySize(memory);
+    resource.setVirtualCores(vCores);
+
+    for (Map.Entry<String, String> customResource :
+            customResources.entrySet()) {
+      String resourceName = customResource.getKey();
+      ResourceInformation resourceInformation =
+              createResourceInformation(resourceName,
+                      customResource.getValue());
+      resource.setResourceInformation(resourceName, resourceInformation);
+    }
+    return resource;
+  }
+
+  public static ResourceInformation createResourceInformation(String
+          resourceName, String descriptor) {
+    ResourceValueAndUnit resourceValueAndUnit =
+            getResourceValueAndUnit(descriptor);
+    return ResourceInformation
+            .newInstance(resourceName, resourceValueAndUnit.unit,
+                    resourceValueAndUnit.value);
+  }
+
+  private static ResourceValueAndUnit getResourceValueAndUnit(String val) {
+    Matcher matcher = RESOURCE_VALUE_AND_UNIT_PATTERN.matcher(val);
+    if (!matcher.find()) {
+      throw new RuntimeException("Invalid pattern of resource descriptor: " +
+              val);
+    } else if (matcher.groupCount() != 2) {
+      throw new RuntimeException("Capturing group count in string " +
+              val + " is not 2!");
+    }
+    long value = Long.parseLong(matcher.group(1));
+
+    return new ResourceValueAndUnit(value, matcher.group(2));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index 0de834c..e06b55e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -183,7 +183,7 @@ public class BuilderUtils {
   public static NodeId newNodeId(String host, int port) {
     return NodeId.newInstance(host, port);
   }
-  
+
   public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
       String httpAddress, String rackName, Resource used, Resource capability,
       int numContainers, String healthReport, long lastHealthReportTime) {
@@ -422,7 +422,7 @@ public class BuilderUtils {
     report.setPriority(priority);
     return report;
   }
-  
+
   public static ApplicationSubmissionContext newApplicationSubmissionContext(
       ApplicationId applicationId, String applicationName, String queue,
       Priority priority, ContainerLaunchContext amContainer,
@@ -477,6 +477,10 @@ public class BuilderUtils {
     return resource;
   }
 
+  public static Resource newEmptyResource() {
+    return recordFactory.newRecordInstance(Resource.class);
+  }
+
   public static URL newURL(String scheme, String host, int port, String file) {
     URL url = recordFactory.newRecordInstance(URL.class);
     url.setScheme(scheme);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index c0d7d86..9b3c20a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -283,24 +284,10 @@ public class SchedulerUtils {
   private static void validateResourceRequest(ResourceRequest resReq,
       Resource maximumResource, QueueInfo queueInfo, RMContext rmContext)
       throws InvalidResourceRequestException {
-    Resource requestedResource = resReq.getCapability();
-    for (int i = 0; i < ResourceUtils.getNumberOfKnownResourceTypes(); i++) {
-      ResourceInformation reqRI = requestedResource.getResourceInformation(i);
-      ResourceInformation maxRI = maximumResource.getResourceInformation(i);
-      if (reqRI.getValue() < 0 || reqRI.getValue() > maxRI.getValue()) {
-        throw new InvalidResourceRequestException(
-            "Invalid resource request, requested resource type=[" + reqRI
-                .getName()
-                + "] < 0 or greater than maximum allowed allocation. Requested "
-                + "resource=" + requestedResource
-                + ", maximum allowed allocation=" + maximumResource
-                + ", please note that maximum allowed allocation is calculated "
-                + "by scheduler based on maximum resource of registered "
-                + "NodeManagers, which might be less than configured "
-                + "maximum allocation=" + ResourceUtils
-                .getResourceTypesMaximumAllocation());
-      }
-    }
+    final Resource requestedResource = resReq.getCapability();
+    checkResourceRequestAgainstAvailableResource(requestedResource,
+        maximumResource);
+
     String labelExp = resReq.getNodeLabelExpression();
     // we don't allow specify label expression other than resourceName=ANY now
     if (!ResourceRequest.ANY.equals(resReq.getResourceName())
@@ -338,6 +325,78 @@ public class SchedulerUtils {
     }
   }
 
+  @Private
+  @VisibleForTesting
+  static void checkResourceRequestAgainstAvailableResource(Resource reqResource,
+      Resource availableResource) throws InvalidResourceRequestException {
+    for (int i = 0; i < ResourceUtils.getNumberOfKnownResourceTypes(); i++) {
+      final ResourceInformation requestedRI =
+          reqResource.getResourceInformation(i);
+      final String reqResourceName = requestedRI.getName();
+
+      if (requestedRI.getValue() < 0) {
+        throwInvalidResourceException(reqResource, availableResource,
+            reqResourceName);
+      }
+
+      final ResourceInformation availableRI =
+          availableResource.getResourceInformation(reqResourceName);
+
+      long requestedResourceValue = requestedRI.getValue();
+      long availableResourceValue = availableRI.getValue();
+      int unitsRelation = UnitsConversionUtil
+          .compareUnits(requestedRI.getUnits(), availableRI.getUnits());
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Requested resource information: " + requestedRI);
+        LOG.debug("Available resource information: " + availableRI);
+        LOG.debug("Relation of units: " + unitsRelation);
+      }
+
+      // requested resource unit is less than available resource unit
+      // e.g. requestedUnit: "m", availableUnit: "K")
+      if (unitsRelation < 0) {
+        availableResourceValue =
+            UnitsConversionUtil.convert(availableRI.getUnits(),
+                requestedRI.getUnits(), availableRI.getValue());
+
+        // requested resource unit is greater than available resource unit
+        // e.g. requestedUnit: "G", availableUnit: "M")
+      } else if (unitsRelation > 0) {
+        requestedResourceValue =
+            UnitsConversionUtil.convert(requestedRI.getUnits(),
+                availableRI.getUnits(), requestedRI.getValue());
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Requested resource value after conversion: " +
+                requestedResourceValue);
+        LOG.info("Available resource value after conversion: " +
+                availableResourceValue);
+      }
+
+      if (requestedResourceValue > availableResourceValue) {
+        throwInvalidResourceException(reqResource, availableResource,
+            reqResourceName);
+      }
+    }
+  }
+
+  private static void throwInvalidResourceException(Resource reqResource,
+      Resource availableResource, String reqResourceName)
+      throws InvalidResourceRequestException {
+    throw new InvalidResourceRequestException(
+        "Invalid resource request, requested resource type=[" + reqResourceName
+            + "] < 0 or greater than maximum allowed allocation. Requested "
+            + "resource=" + reqResource + ", maximum allowed allocation="
+            + availableResource
+            + ", please note that maximum allowed allocation is calculated "
+            + "by scheduler based on maximum resource of registered "
+            + "NodeManagers, which might be less than configured "
+            + "maximum allocation="
+            + ResourceUtils.getResourceTypesMaximumAllocation());
+  }
+
   private static void checkQueueLabelInLabelManager(String labelExpression,
       RMContext rmContext) throws InvalidLabelResourceRequestException {
     // check node label manager contains this label


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-8202. DefaultAMSProcessor should properly check units of requested custom resource types against minimum/maximum allocation (snemeth via rkanter)

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 90e4be8..9696741 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -22,9 +22,13 @@ import static java.lang.Thread.sleep;
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB;
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES;
 
+
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -61,6 +65,7 @@ import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
+import org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -75,6 +80,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
+        .FairSchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
@@ -365,7 +373,7 @@ public class TestApplicationMasterService {
       am2.addContainerToBeReleased(cId);
       try {
         am2.schedule();
-        Assert.fail("Exception was expected!!");
+        fail("Exception was expected!!");
       } catch (InvalidContainerReleaseException e) {
         StringBuilder sb = new StringBuilder("Cannot release container : ");
         sb.append(cId.toString());
@@ -460,7 +468,7 @@ public class TestApplicationMasterService {
               FinalApplicationStatus.FAILED, "", "");
       try {
         am1.unregisterAppAttempt(req, false);
-        Assert.fail("ApplicationMasterNotRegisteredException should be thrown");
+        fail("ApplicationMasterNotRegisteredException should be thrown");
       } catch (ApplicationMasterNotRegisteredException e) {
         Assert.assertNotNull(e);
         Assert.assertNotNull(e.getMessage());
@@ -468,7 +476,7 @@ public class TestApplicationMasterService {
             "Application Master is trying to unregister before registering for:"
         ));
       } catch (Exception e) {
-        Assert.fail("ApplicationMasterNotRegisteredException should be thrown");
+        fail("ApplicationMasterNotRegisteredException should be thrown");
       }
 
       am1.registerAppAttempt();
@@ -627,9 +635,7 @@ public class TestApplicationMasterService {
       Assert.assertEquals("UPDATE_OUTSTANDING_ERROR",
           response.getUpdateErrors().get(0).getReason());
     } finally {
-      if (rm != null) {
-        rm.close();
-      }
+      rm.close();
     }
   }
 
@@ -709,34 +715,48 @@ public class TestApplicationMasterService {
 
     ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
 
-    CapacitySchedulerConfiguration csconf =
-        new CapacitySchedulerConfiguration();
-    csconf.setResourceComparator(DominantResourceCalculator.class);
+    final YarnConfiguration yarnConf;
+    if (schedulerCls.getCanonicalName()
+        .equals(CapacityScheduler.class.getCanonicalName())) {
+      CapacitySchedulerConfiguration csConf =
+          new CapacitySchedulerConfiguration();
+      csConf.setResourceComparator(DominantResourceCalculator.class);
+      yarnConf = new YarnConfiguration(csConf);
+    } else if (schedulerCls.getCanonicalName()
+        .equals(FairScheduler.class.getCanonicalName())) {
+      FairSchedulerConfiguration fsConf = new FairSchedulerConfiguration();
+      yarnConf = new YarnConfiguration(fsConf);
+    } else {
+      throw new IllegalStateException(
+          "Scheduler class is of wrong type: " + schedulerCls);
+    }
 
-    YarnConfiguration conf = new YarnConfiguration(csconf);
     // Don't reset resource types since we have already configured resource
     // types
-    conf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES, false);
-    conf.setClass(YarnConfiguration.RM_SCHEDULER, schedulerCls,
+    yarnConf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES,
+        false);
+    yarnConf.setClass(YarnConfiguration.RM_SCHEDULER, schedulerCls,
         ResourceScheduler.class);
-    conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, false);
+    yarnConf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, false);
 
-    MockRM rm = new MockRM(conf);
+    MockRM rm = new MockRM(yarnConf);
     rm.start();
 
     MockNM nm1 = rm.registerNode("199.99.99.1:1234", TestUtils
         .createResource(DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
             DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, null));
 
-    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    RMApp app1 = rm.submitApp(GB, "app", "user", null, "default");
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
 
     // Now request resource, memory > allowed
     boolean exception = false;
     try {
-      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
-          Resource.newInstance(9 * GB, 1)).numContainers(1).resourceName("*")
-          .build()), null);
+      am1.allocate(Collections.singletonList(ResourceRequest.newBuilder()
+              .capability(Resource.newInstance(9 * GB, 1))
+              .numContainers(1)
+              .resourceName("*")
+              .build()), null);
     } catch (InvalidResourceRequestException e) {
       exception = true;
     }
@@ -744,10 +764,12 @@ public class TestApplicationMasterService {
 
     exception = false;
     try {
-      // Now request resource, vcore > allowed
-      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
-          Resource.newInstance(8 * GB, 18)).numContainers(1).resourceName("*")
-          .build()), null);
+      // Now request resource, vcores > allowed
+      am1.allocate(Collections.singletonList(ResourceRequest.newBuilder()
+              .capability(Resource.newInstance(8 * GB, 18))
+              .numContainers(1)
+              .resourceName("*")
+              .build()), null);
     } catch (InvalidResourceRequestException e) {
       exception = true;
     }
@@ -756,6 +778,73 @@ public class TestApplicationMasterService {
     rm.close();
   }
 
+  @Test
+  public void testValidateRequestCapacityAgainstMinMaxAllocationWithDifferentUnits()
+      throws Exception {
+
+    // Initialize resource map for 2 types.
+    Map<String, ResourceInformation> riMap = new HashMap<>();
+
+    // Initialize mandatory resources
+    ResourceInformation memory =
+        ResourceInformation.newInstance(ResourceInformation.MEMORY_MB.getName(),
+            ResourceInformation.MEMORY_MB.getUnits(),
+            YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
+    ResourceInformation vcores =
+        ResourceInformation.newInstance(ResourceInformation.VCORES.getName(),
+            ResourceInformation.VCORES.getUnits(),
+            YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
+            DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+    ResourceInformation res1 =
+        ResourceInformation.newInstance("res_1", "G", 0, 4);
+    riMap.put(ResourceInformation.MEMORY_URI, memory);
+    riMap.put(ResourceInformation.VCORES_URI, vcores);
+    riMap.put("res_1", res1);
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    FairSchedulerConfiguration fsConf =
+            new FairSchedulerConfiguration();
+
+    YarnConfiguration yarnConf = new YarnConfiguration(fsConf);
+    // Don't reset resource types since we have already configured resource
+    // types
+    yarnConf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES,
+        false);
+    yarnConf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
+        ResourceScheduler.class);
+    yarnConf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, false);
+
+    MockRM rm = new MockRM(yarnConf);
+    rm.start();
+
+    MockNM nm1 = rm.registerNode("199.99.99.1:1234",
+        ResourceTypesTestHelper.newResource(
+            DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+            DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
+            ImmutableMap.<String, String> builder()
+                .put("res_1", "5G").build()));
+
+    RMApp app1 = rm.submitApp(GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
+
+    // Now request res_1, 500M < 5G so it should be allowed
+    try {
+      am1.allocate(Collections.singletonList(ResourceRequest.newBuilder()
+          .capability(ResourceTypesTestHelper.newResource(4 * GB, 1,
+              ImmutableMap.<String, String> builder()
+                  .put("res_1", "500M")
+                      .build()))
+          .numContainers(1).resourceName("*").build()), null);
+    } catch (InvalidResourceRequestException e) {
+      fail(
+          "Allocate request should be accepted but exception was thrown: " + e);
+    }
+
+    rm.close();
+  }
+
   @Test(timeout = 300000)
   public void testValidateRequestCapacityAgainstMinMaxAllocationFor3rdResourceTypes()
       throws Exception {
@@ -774,11 +863,11 @@ public class TestApplicationMasterService {
         ResourceInformation.VCORES.getUnits(),
         YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
         DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
-    ResourceInformation res_1 = ResourceInformation.newInstance("res_1",
+    ResourceInformation res1 = ResourceInformation.newInstance("res_1",
         ResourceInformation.VCORES.getUnits(), 0, 4);
     riMap.put(ResourceInformation.MEMORY_URI, memory);
     riMap.put(ResourceInformation.VCORES_URI, vcores);
-    riMap.put("res_1", res_1);
+    riMap.put("res_1", res1);
 
     ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
 
@@ -786,15 +875,16 @@ public class TestApplicationMasterService {
         new CapacitySchedulerConfiguration();
     csconf.setResourceComparator(DominantResourceCalculator.class);
 
-    YarnConfiguration conf = new YarnConfiguration(csconf);
+    YarnConfiguration yarnConf = new YarnConfiguration(csconf);
     // Don't reset resource types since we have already configured resource
     // types
-    conf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES, false);
-    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+    yarnConf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES,
+        false);
+    yarnConf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
         ResourceScheduler.class);
-    conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, false);
+    yarnConf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, false);
 
-    MockRM rm = new MockRM(conf);
+    MockRM rm = new MockRM(yarnConf);
     rm.start();
 
     CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
@@ -805,18 +895,21 @@ public class TestApplicationMasterService {
             DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
             ImmutableMap.of("res_1", 4)));
 
-    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    RMApp app1 = rm.submitApp(GB, "app", "user", null, "default");
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
 
-    Assert.assertEquals(Resource.newInstance(1 * GB, 1),
+    Assert.assertEquals(Resource.newInstance(GB, 1),
         leafQueue.getUsedResources());
 
     // Now request resource, memory > allowed
     boolean exception = false;
     try {
-      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
-          TestUtils.createResource(9 * GB, 1, ImmutableMap.of("res_1", 1)))
-          .numContainers(1).resourceName("*").build()), null);
+      am1.allocate(Collections.singletonList(ResourceRequest.newBuilder()
+              .capability(TestUtils.createResource(9 * GB, 1,
+                      ImmutableMap.of("res_1", 1)))
+              .numContainers(1)
+              .resourceName("*")
+              .build()), null);
     } catch (InvalidResourceRequestException e) {
       exception = true;
     }
@@ -824,11 +917,13 @@ public class TestApplicationMasterService {
 
     exception = false;
     try {
-      // Now request resource, vcore > allowed
-      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
-          TestUtils.createResource(8 * GB, 18, ImmutableMap.of("res_1", 1)))
-          .numContainers(1).resourceName("*")
-          .build()), null);
+      // Now request resource, vcores > allowed
+      am1.allocate(Collections.singletonList(ResourceRequest.newBuilder()
+          .capability(
+              TestUtils.createResource(8 * GB, 18, ImmutableMap.of("res_1", 1)))
+              .numContainers(1)
+              .resourceName("*")
+              .build()), null);
     } catch (InvalidResourceRequestException e) {
       exception = true;
     }
@@ -837,10 +932,12 @@ public class TestApplicationMasterService {
     exception = false;
     try {
       // Now request resource, res_1 > allowed
-      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
-          TestUtils.createResource(8 * GB, 1, ImmutableMap.of("res_1", 100)))
-          .numContainers(1).resourceName("*")
-          .build()), null);
+      am1.allocate(Collections.singletonList(ResourceRequest.newBuilder()
+              .capability(TestUtils.createResource(8 * GB, 1,
+                      ImmutableMap.of("res_1", 100)))
+              .numContainers(1)
+              .resourceName("*")
+              .build()), null);
     } catch (InvalidResourceRequestException e) {
       exception = true;
     }
@@ -856,7 +953,7 @@ public class TestApplicationMasterService {
       rmContainer.handle(
           new RMContainerEvent(containerId, RMContainerEventType.LAUNCHED));
     } else {
-      Assert.fail("Cannot find RMContainer");
+      fail("Cannot find RMContainer");
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fa7cb6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index cb1f794..15cfdb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -26,7 +26,9 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.io.ByteArrayInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedAction;
 import java.util.Arrays;
@@ -35,6 +37,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +45,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
@@ -63,8 +67,10 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS;
@@ -83,20 +89,79 @@ import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
+import org.junit.rules.ExpectedException;
 
 public class TestSchedulerUtils {
 
   private static final Log LOG = LogFactory.getLog(TestSchedulerUtils.class);
-  
+  private static Resource configuredMaxAllocation;
+
+  private static class CustomResourceTypesConfigurationProvider
+          extends LocalConfigurationProvider {
+
+    @Override
+    public InputStream getConfigurationInputStream(Configuration bootstrapConf,
+            String name) throws YarnException, IOException {
+      if (YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE.equals(name)) {
+        return new ByteArrayInputStream(
+                ("<configuration>\n" +
+                        " <property>\n" +
+                        "   <name>yarn.resource-types</name>\n" +
+                        "   <value>custom-resource-1," +
+                        "custom-resource-2,custom-resource-3</value>\n" +
+                        " </property>\n" +
+                        " <property>\n" +
+                        "   <name>yarn.resource-types" +
+                        ".custom-resource-1.units</name>\n" +
+                        "   <value>G</value>\n" +
+                        " </property>\n" +
+                        " <property>\n" +
+                        "   <name>yarn.resource-types" +
+                        ".custom-resource-2.units</name>\n" +
+                        "   <value>G</value>\n" +
+                        " </property>\n" +
+                        "</configuration>\n").getBytes());
+      } else {
+        return super.getConfigurationInputStream(bootstrapConf, name);
+      }
+    }
+  }
   private RMContext rmContext = getMockRMContext();
+
   private static YarnConfiguration conf = new YarnConfiguration();
 
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  private void initResourceTypes() {
+    Configuration yarnConf = new Configuration();
+    yarnConf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+        CustomResourceTypesConfigurationProvider.class.getName());
+    ResourceUtils.resetResourceTypes(yarnConf);
+  }
+
+  @Before
+  public void setUp() {
+    initResourceTypes();
+    //this needs to be initialized after initResourceTypes is called
+    configuredMaxAllocation = Resource.newInstance(8192, 4,
+            ImmutableMap.<String,
+                    Long>builder()
+                    .put("custom-resource-1", Long.MAX_VALUE)
+                    .put("custom-resource-2", Long.MAX_VALUE)
+                    .put("custom-resource-3", Long.MAX_VALUE)
+                    .build());
+  }
+
   @Test (timeout = 30000)
   public void testNormalizeRequest() {
     ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
@@ -150,16 +215,18 @@ public class TestSchedulerUtils {
     // multiple of minMemory > maxMemory, then reduce to maxMemory
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
         maxResource);
-    assertEquals(maxResource.getMemorySize(), ask.getCapability().getMemorySize());
+    assertEquals(maxResource.getMemorySize(),
+        ask.getCapability().getMemorySize());
 
     // ask is more than max
     maxResource = Resources.createResource(maxMemory, 0);
     ask.setCapability(Resources.createResource(maxMemory + 100));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
         maxResource);
-    assertEquals(maxResource.getMemorySize(), ask.getCapability().getMemorySize());
+    assertEquals(maxResource.getMemorySize(),
+        ask.getCapability().getMemorySize());
   }
-  
+
   @Test (timeout = 30000)
   public void testNormalizeRequestWithDominantResourceCalculator() {
     ResourceCalculator resourceCalculator = new DominantResourceCalculator();
@@ -201,10 +268,11 @@ public class TestSchedulerUtils {
     Set<String> queueAccessibleNodeLabels = Sets.newHashSet();
     QueueInfo queueInfo = mock(QueueInfo.class);
     when(queueInfo.getQueueName()).thenReturn("queue");
-    when(queueInfo.getAccessibleNodeLabels()).thenReturn(queueAccessibleNodeLabels);
+    when(queueInfo.getAccessibleNodeLabels())
+        .thenReturn(queueAccessibleNodeLabels);
     when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean()))
         .thenReturn(queueInfo);
-    
+
     Resource maxResource = Resources.createResource(
         YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
         YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
@@ -363,7 +431,7 @@ public class TestSchedulerUtils {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
           Arrays.asList("x"));
     }
-    Assert.assertTrue("InvalidLabelResourceRequestException excpeted",
+    Assert.assertTrue("InvalidLabelResourceRequestException expected",
         invalidlabelexception);
     // queue is "*", always succeeded
     try {
@@ -610,11 +678,9 @@ public class TestSchedulerUtils {
 
     // more than max vcores
     try {
-      Resource resource =
-          Resources
-              .createResource(
-                  YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-                  YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1);
+      Resource resource = Resources.createResource(
+          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1);
       ResourceRequest resReq =
           BuilderUtils.newResourceRequest(mock(Priority.class),
               ResourceRequest.ANY, resource, 1);
@@ -648,10 +714,10 @@ public class TestSchedulerUtils {
     waitForLaunchedState(attempt);
 
     // Create a client to the RM.
-    final Configuration conf = rm.getConfig();
-    final YarnRPC rpc = YarnRPC.create(conf);
+    final Configuration yarnConf = rm.getConfig();
+    final YarnRPC rpc = YarnRPC.create(yarnConf);
 
-    UserGroupInformation currentUser = 
+    UserGroupInformation currentUser =
         UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
     Credentials credentials = containerManager.getContainerCredentials();
     final InetSocketAddress rmBindAddress =
@@ -665,7 +731,7 @@ public class TestSchedulerUtils {
           @Override
           public ApplicationMasterProtocol run() {
             return (ApplicationMasterProtocol) rpc.getProxy(
-              ApplicationMasterProtocol.class, rmBindAddress, conf);
+              ApplicationMasterProtocol.class, rmBindAddress, yarnConf);
           }
         });
 
@@ -775,6 +841,127 @@ public class TestSchedulerUtils {
     }
   }
 
+  @Test
+  public void testCustomResourceRequestedUnitIsSmallerThanAvailableUnit()
+      throws InvalidResourceRequestException {
+    Resource requestedResource =
+        ResourceTypesTestHelper.newResource(1, 1,
+                ImmutableMap.of("custom-resource-1", "11"));
+
+    Resource availableResource =
+        ResourceTypesTestHelper.newResource(1, 1,
+                ImmutableMap.of("custom-resource-1", "0G"));
+
+    exception.expect(InvalidResourceRequestException.class);
+    exception.expectMessage(InvalidResourceRequestExceptionMessageGenerator
+        .create().withRequestedResourceType("custom-resource-1")
+        .withRequestedResource(requestedResource)
+        .withAvailableAllocation(availableResource)
+        .withMaxAllocation(configuredMaxAllocation).build());
+
+    SchedulerUtils.checkResourceRequestAgainstAvailableResource(
+        requestedResource, availableResource);
+  }
+
+  @Test
+  public void testCustomResourceRequestedUnitIsSmallerThanAvailableUnit2() {
+    Resource requestedResource =
+        ResourceTypesTestHelper.newResource(1, 1,
+                ImmutableMap.of("custom-resource-1", "11"));
+
+    Resource availableResource =
+        ResourceTypesTestHelper.newResource(1, 1,
+                ImmutableMap.of("custom-resource-1", "1G"));
+
+    try {
+      SchedulerUtils.checkResourceRequestAgainstAvailableResource(
+          requestedResource, availableResource);
+    } catch (InvalidResourceRequestException e) {
+      fail(String.format(
+          "Resource request should be accepted. Requested: %s, available: %s",
+          requestedResource, availableResource));
+    }
+  }
+
+  @Test
+  public void testCustomResourceRequestedUnitIsGreaterThanAvailableUnit()
+      throws InvalidResourceRequestException {
+    Resource requestedResource =
+        ResourceTypesTestHelper.newResource(1, 1,
+                ImmutableMap.of("custom-resource-1", "1M"));
+
+    Resource availableResource = ResourceTypesTestHelper.newResource(1, 1,
+        ImmutableMap.<String, String> builder().put("custom-resource-1", "120k")
+            .build());
+
+    exception.expect(InvalidResourceRequestException.class);
+    exception.expectMessage(InvalidResourceRequestExceptionMessageGenerator
+        .create().withRequestedResourceType("custom-resource-1")
+        .withRequestedResource(requestedResource)
+        .withAvailableAllocation(availableResource)
+        .withMaxAllocation(configuredMaxAllocation).build());
+    SchedulerUtils.checkResourceRequestAgainstAvailableResource(
+        requestedResource, availableResource);
+  }
+
+  @Test
+  public void testCustomResourceRequestedUnitIsGreaterThanAvailableUnit2() {
+    Resource requestedResource = ResourceTypesTestHelper.newResource(1, 1,
+        ImmutableMap.<String, String> builder().put("custom-resource-1", "11M")
+            .build());
+
+    Resource availableResource =
+        ResourceTypesTestHelper.newResource(1, 1,
+                ImmutableMap.of("custom-resource-1", "1G"));
+
+    try {
+      SchedulerUtils.checkResourceRequestAgainstAvailableResource(
+          requestedResource, availableResource);
+    } catch (InvalidResourceRequestException e) {
+      fail(String.format(
+          "Resource request should be accepted. Requested: %s, available: %s",
+          requestedResource, availableResource));
+    }
+  }
+
+  @Test
+  public void testCustomResourceRequestedUnitIsSameAsAvailableUnit() {
+    Resource requestedResource = ResourceTypesTestHelper.newResource(1, 1,
+        ImmutableMap.of("custom-resource-1", "11M"));
+
+    Resource availableResource = ResourceTypesTestHelper.newResource(1, 1,
+        ImmutableMap.of("custom-resource-1", "100M"));
+
+    try {
+      SchedulerUtils.checkResourceRequestAgainstAvailableResource(
+          requestedResource, availableResource);
+    } catch (InvalidResourceRequestException e) {
+      fail(String.format(
+          "Resource request should be accepted. Requested: %s, available: %s",
+          requestedResource, availableResource));
+    }
+  }
+
+  @Test
+  public void testCustomResourceRequestedUnitIsSameAsAvailableUnit2()
+      throws InvalidResourceRequestException {
+    Resource requestedResource = ResourceTypesTestHelper.newResource(1, 1,
+        ImmutableMap.of("custom-resource-1", "110M"));
+
+    Resource availableResource = ResourceTypesTestHelper.newResource(1, 1,
+        ImmutableMap.of("custom-resource-1", "100M"));
+
+    exception.expect(InvalidResourceRequestException.class);
+    exception.expectMessage(InvalidResourceRequestExceptionMessageGenerator
+        .create().withRequestedResourceType("custom-resource-1")
+        .withRequestedResource(requestedResource)
+        .withAvailableAllocation(availableResource)
+        .withMaxAllocation(configuredMaxAllocation).build());
+
+    SchedulerUtils.checkResourceRequestAgainstAvailableResource(
+        requestedResource, availableResource);
+  }
+
   public static void waitSchedulerApplicationAttemptStopped(
       AbstractYarnScheduler ys,
       ApplicationAttemptId attemptId) throws InterruptedException {
@@ -801,8 +988,7 @@ public class TestSchedulerUtils {
   public static SchedulerApplication<SchedulerApplicationAttempt>
       verifyAppAddedAndRemovedFromScheduler(
           Map<ApplicationId, SchedulerApplication<SchedulerApplicationAttempt>> applications,
-          EventHandler<SchedulerEvent> handler, String queueName)
-          throws Exception {
+          EventHandler<SchedulerEvent> handler, String queueName) {
 
     ApplicationId appId =
         ApplicationId.newInstance(System.currentTimeMillis(), 1);
@@ -832,4 +1018,60 @@ public class TestSchedulerUtils {
     when(rmContext.getNodeLabelManager()).thenReturn(nlm);
     return rmContext;
   }
+
+  private static class InvalidResourceRequestExceptionMessageGenerator {
+
+    private StringBuilder sb;
+    private Resource requestedResource;
+    private Resource availableAllocation;
+    private Resource configuredMaxAllowedAllocation;
+    private String resourceType;
+
+    InvalidResourceRequestExceptionMessageGenerator(StringBuilder sb) {
+      this.sb = sb;
+    }
+
+    public static InvalidResourceRequestExceptionMessageGenerator create() {
+      return new InvalidResourceRequestExceptionMessageGenerator(
+          new StringBuilder());
+    }
+
+    InvalidResourceRequestExceptionMessageGenerator withRequestedResource(
+            Resource r) {
+      this.requestedResource = r;
+      return this;
+    }
+
+    InvalidResourceRequestExceptionMessageGenerator withRequestedResourceType(
+            String rt) {
+      this.resourceType = rt;
+      return this;
+    }
+
+    InvalidResourceRequestExceptionMessageGenerator withAvailableAllocation(
+            Resource r) {
+      this.availableAllocation = r;
+      return this;
+    }
+
+    InvalidResourceRequestExceptionMessageGenerator withMaxAllocation(
+        Resource r) {
+      this.configuredMaxAllowedAllocation = r;
+      return this;
+    }
+
+    public String build() {
+      return sb
+          .append("Invalid resource request, requested resource type=[")
+          .append(resourceType).append("]")
+          .append(" < 0 or greater than maximum allowed allocation. ")
+          .append("Requested resource=").append(requestedResource).append(", ")
+          .append("maximum allowed allocation=").append(availableAllocation)
+          .append(", please note that maximum allowed allocation is calculated "
+              + "by scheduler based on maximum resource of " +
+                  "registered NodeManagers, which might be less than " +
+                  "configured maximum allocation=")
+          .append(configuredMaxAllowedAllocation).toString();
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: YARN-8274. Fixed a bug on docker start command. Contributed by Jason Lowe

Posted by xy...@apache.org.
YARN-8274. Fixed a bug on docker start command.
           Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a67f2c53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a67f2c53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a67f2c53

Branch: refs/heads/HDDS-4
Commit: a67f2c538f50a4cb5831a8cc14d3a8e85155de61
Parents: d0db646
Author: Eric Yang <ey...@apache.org>
Authored: Fri May 11 14:23:16 2018 -0400
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../container-executor/impl/utils/docker-util.c       | 14 ++++++++++++--
 .../container-executor/test/utils/test_docker_util.cc |  4 ++--
 2 files changed, 14 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67f2c53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 8cd59f7..5be02a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -830,6 +830,7 @@ free_and_exit:
 
 int get_docker_start_command(const char *command_file, const struct configuration *conf, args *args) {
   int ret = 0;
+  char *docker = NULL;
   char *container_name = NULL;
   struct configuration command_config = {0, NULL};
   ret = read_and_verify_command_file(command_file, DOCKER_START_COMMAND, &command_config);
@@ -842,9 +843,18 @@ int get_docker_start_command(const char *command_file, const struct configuratio
     return INVALID_DOCKER_CONTAINER_NAME;
   }
 
+  docker = get_docker_binary(conf);
+  ret = add_to_args(args, docker);
+  free(docker);
+  if (ret != 0) {
+    ret = BUFFER_TOO_SMALL;
+    goto free_and_exit;
+  }
+
   ret = add_docker_config_param(&command_config, args);
   if (ret != 0) {
-    return BUFFER_TOO_SMALL;
+    ret = BUFFER_TOO_SMALL;
+    goto free_and_exit;
   }
 
   ret = add_to_args(args, DOCKER_START_COMMAND);
@@ -933,7 +943,7 @@ static int set_pid_namespace(const struct configuration *command_config,
       if (pid_host_enabled != NULL) {
         if (strcmp(pid_host_enabled, "1") == 0 ||
             strcasecmp(pid_host_enabled, "True") == 0) {
-          ret = add_to_args(args, "--pid='host'");
+          ret = add_to_args(args, "--pid=host");
           if (ret != 0) {
             ret = BUFFER_TOO_SMALL;
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67f2c53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index 1096935..3746fa1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -365,7 +365,7 @@ namespace ContainerExecutor {
     std::vector<std::pair<std::string, std::string> > file_cmd_vec;
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
          "[docker-command-execution]\n  docker-command=start\n  name=container_e1_12312_11111_02_000001",
-         "start container_e1_12312_11111_02_000001"));
+         "/usr/bin/docker start container_e1_12312_11111_02_000001"));
 
     std::vector<std::pair<std::string, int> > bad_file_cmd_vec;
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
@@ -514,7 +514,7 @@ namespace ContainerExecutor {
     std::vector<std::pair<std::string, std::string> >::const_iterator itr;
     std::vector<std::pair<std::string, int> >::const_iterator itr2;
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  pid=host", "--pid='host'"));
+        "[docker-command-execution]\n  docker-command=run\n  pid=host", "--pid=host"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n  docker-command=run", ""));
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: YARN-3610. FairScheduler: Add steady-fair-shares to the REST API documentation. (Ray Chiang via Haibo Chen)

Posted by xy...@apache.org.
YARN-3610. FairScheduler: Add steady-fair-shares to the REST API documentation. (Ray Chiang via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce914a45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce914a45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce914a45

Branch: refs/heads/HDDS-4
Commit: ce914a4539cd3685c5fc41061124f0b43ecca518
Parents: c74d172
Author: Haibo Chen <ha...@apache.org>
Authored: Fri May 11 14:07:09 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 10:31:09 2018 -0700

----------------------------------------------------------------------
 .../src/site/markdown/ResourceManagerRest.md    | 118 +++++++++++++++++--
 1 file changed, 110 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce914a45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index caeaf3e..a30677c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -1130,7 +1130,7 @@ Response Body:
 | type | string | Scheduler type - fairScheduler |
 | rootQueue | The root queue object | A collection of root queue resources |
 
-### Elements of the root queue object
+### Elements of all queue objects
 
 | Item | Data Type | Description |
 |:---- |:---- |:---- |
@@ -1142,17 +1142,23 @@ Response Body:
 | clusterResources | A single resource object | The capacity of the cluster |
 | queueName | string | The name of the queue |
 | schedulingPolicy | string | The name of the scheduling policy used by the queue |
-| childQueues | array of queues(JSON)/queue objects(XML) | A collection of sub-queue information. Omitted if the queue has no childQueues. |
+| childQueues | array of queues(JSON)/queue objects(XML) | A collection of sub-queue information. Omitted if the queue has no childQueues or is a leaf queue. |
+| allocatedContainers | int | The number of allocated containers |
+| demandResources | A single resource object | The resources that have been requested by containers in this queue which have not been fulfilled by the scheduler |
+| pendingContainers | int | The number of pending containers |
+| preemptable | boolean | true if containers in this queue can be preempted |
+| reservedContainers | int | The number of reserved containers |
+| steadyFairResources | A single resource object | The steady fair share for the queue |
 
-### Elements of the queues object for a Leaf queue - contains all the elements in parent except 'childQueues' plus the following
+### Additional elements of leaf queue objects (with the exception of the 'childQueues' property)
 
 | Item | Data Type | Description |
 |:---- |:---- |:---- |
-| type | string | type of the queue - fairSchedulerLeafQueueInfo |
+| type | string | The type of the queue - fairSchedulerLeafQueueInfo |
 | numActiveApps | int | The number of active applications in this queue |
 | numPendingApps | int | The number of pending applications in this queue |
 
-### Elements of the resource object for resourcesUsed in queues
+### Elements of the (cluster/demand/fair/max/min/used/*)Resources object in queues
 
 | Item | Data Type | Description |
 |:---- |:---- |:---- |
@@ -1181,13 +1187,19 @@ Response Body:
     "scheduler": {
         "schedulerInfo": {
             "rootQueue": {
+                "allocatedContainers": 0,
                 "childQueues": {
                     "queue": [
                         {
+                            "allocatedContainers": 0,
                             "clusterResources": {
                                 "memory": 8192,
                                 "vCores": 8
                             },
+                            "demandResources": {
+                                "memory": 0,
+                                "vCores": 0
+                            },
                             "fairResources": {
                                 "memory": 0,
                                 "vCores": 0
@@ -1203,8 +1215,15 @@ Response Body:
                             },
                             "numActiveApps": 0,
                             "numPendingApps": 0,
+                            "pendingContainers": 0,
+                            "preemptable": true,
                             "queueName": "root.default",
+                            "reservedContainers": 0,
                             "schedulingPolicy": "fair",
+                            "steadyFairResources": {
+                                "memory": 4096,
+                                "vCores": 0
+                            },
                             "type": "fairSchedulerLeafQueueInfo",
                             "usedResources": {
                                 "memory": 0,
@@ -1212,12 +1231,18 @@ Response Body:
                             }
                         },
                         {
+                            "allocatedContainers": 0,
                             "childQueues": {
                                 "queue": [
                                     {
+                                        "allocatedContainers": 0,
                                         "clusterResources": {
                                             "memory": 8192,
-                                           "vCores": 8
+                                            "vCores": 8
+                                        },
+                                        "demandResources": {
+                                            "memory": 0,
+                                            "vCores": 0
                                         },
                                         "fairResources": {
                                             "memory": 10000,
@@ -1234,8 +1259,15 @@ Response Body:
                                         },
                                         "numActiveApps": 0,
                                         "numPendingApps": 0,
+                                        "pendingContainers": 0,
+                                        "preemptable": true,
                                         "queueName": "root.sample_queue.sample_sub_queue",
+                                        "reservedContainers": 0,
                                         "schedulingPolicy": "fair",
+                                        "steadyFairResources": {
+                                            "memory": 4096,
+                                            "vCores": 0
+                                        },
                                         "type": "fairSchedulerLeafQueueInfo",
                                         "usedResources": {
                                             "memory": 0,
@@ -1248,6 +1280,10 @@ Response Body:
                                 "memory": 8192,
                                 "vCores": 8
                             },
+                            "demandResources": {
+                                "memory": 0,
+                                "vCores": 0
+                            },
                             "fairResources": {
                                 "memory": 10000,
                                 "vCores": 0
@@ -1261,19 +1297,30 @@ Response Body:
                                 "memory": 10000,
                                 "vCores": 0
                             },
+                            "pendingContainers": 0,
+                            "preemptable": true,
                             "queueName": "root.sample_queue",
+                            "reservedContainers": 0,
                             "schedulingPolicy": "fair",
+                            "steadyFairResources": {
+                                "memory": 4096,
+                                "vCores": 0
+                            },
                             "usedResources": {
                                 "memory": 0,
                                 "vCores": 0
                             }
                         }
-                    ],
+                    ]
                 },
                 "clusterResources": {
                     "memory": 8192,
                     "vCores": 8
                 },
+                "demandResources": {
+                    "memory": 0,
+                    "vCores": 0
+                },
                 "fairResources": {
                     "memory": 8192,
                     "vCores": 8
@@ -1287,8 +1334,15 @@ Response Body:
                     "memory": 0,
                     "vCores": 0
                 },
+                "pendingContainers": 0,
+                "preemptable": true,
                 "queueName": "root",
+                "reservedContainers": 0,
                 "schedulingPolicy": "fair",
+                "steadyFairResources": {
+                    "memory": 8192,
+                    "vCores": 8
+                },
                 "usedResources": {
                     "memory": 0,
                     "vCores": 0
@@ -1334,6 +1388,14 @@ Response Body:
         <memory>0</memory>
         <vCores>0</vCores>
       </usedResources>
+      <demandResources>
+        <memory>0</memory>
+        <vCores>0</vCores>
+      </demandResources>
+      <steadyFairResources>
+        <memory>8192</memory>
+        <vCores>8</vCores>
+      </steadyFairResources>
       <fairResources>
         <memory>8192</memory>
         <vCores>8</vCores>
@@ -1342,8 +1404,12 @@ Response Body:
         <memory>8192</memory>
         <vCores>8</vCores>
       </clusterResources>
+      <pendingContainers>0</pendingContainers>
+      <allocatedContainers>0</allocatedContainers>
+      <reservedContainers>0</reservedContainers>
       <queueName>root</queueName>
       <schedulingPolicy>fair</schedulingPolicy>
+      <preemptable>true</preemptable>
       <childQueues>
         <queue xsi:type="fairSchedulerLeafQueueInfo">
           <maxApps>2147483647</maxApps>
@@ -1359,6 +1425,14 @@ Response Body:
             <memory>0</memory>
             <vCores>0</vCores>
           </usedResources>
+          <demandResources>
+            <memory>0</memory>
+            <vCores>0</vCores>
+          </demandResources>
+          <steadyFairResources>
+            <memory>4096</memory>
+            <vCores>0</vCores>
+          </steadyFairResources>
           <fairResources>
             <memory>0</memory>
             <vCores>0</vCores>
@@ -1367,15 +1441,19 @@ Response Body:
             <memory>8192</memory>
             <vCores>8</vCores>
           </clusterResources>
+          <pendingContainers>0</pendingContainers>
+          <allocatedContainers>0</allocatedContainers>
+          <reservedContainers>0</reservedContainers>
           <queueName>root.default</queueName>
           <schedulingPolicy>fair</schedulingPolicy>
+          <preemptable>true</preemptable>
           <numPendingApps>0</numPendingApps>
           <numActiveApps>0</numActiveApps>
         </queue>
         <queue>
           <maxApps>50</maxApps>
           <minResources>
-            <memory>10000</memory>
+            <memory>0</memory>
             <vCores>0</vCores>
           </minResources>
           <maxResources>
@@ -1386,6 +1464,14 @@ Response Body:
             <memory>0</memory>
             <vCores>0</vCores>
           </usedResources>
+          <demandResources>
+            <memory>0</memory>
+            <vCores>0</vCores>
+          </demandResources>
+          <steadyFairResources>
+            <memory>4096</memory>
+            <vCores>0</vCores>
+          </steadyFairResources>
           <fairResources>
             <memory>10000</memory>
             <vCores>0</vCores>
@@ -1394,8 +1480,12 @@ Response Body:
             <memory>8192</memory>
             <vCores>8</vCores>
           </clusterResources>
+          <pendingContainers>0</pendingContainers>
+          <allocatedContainers>0</allocatedContainers>
+          <reservedContainers>0</reservedContainers>
           <queueName>root.sample_queue</queueName>
           <schedulingPolicy>fair</schedulingPolicy>
+          <preemptable>true</preemptable>
           <childQueues>
             <queue xsi:type="fairSchedulerLeafQueueInfo">
               <maxApps>2147483647</maxApps>
@@ -1411,6 +1501,14 @@ Response Body:
                 <memory>0</memory>
                 <vCores>0</vCores>
               </usedResources>
+              <demandResources>
+                <memory>0</memory>
+                <vCores>0</vCores>
+              </demandResources>
+              <steadyFairResources>
+                <memory>4096</memory>
+                <vCores>0</vCores>
+              </steadyFairResources>
               <fairResources>
                 <memory>10000</memory>
                 <vCores>0</vCores>
@@ -1419,8 +1517,12 @@ Response Body:
                 <memory>8192</memory>
                 <vCores>8</vCores>
               </clusterResources>
+              <pendingContainers>0</pendingContainers>
+              <allocatedContainers>0</allocatedContainers>
+              <reservedContainers>0</reservedContainers>
               <queueName>root.sample_queue.sample_sub_queue</queueName>
               <schedulingPolicy>fair</schedulingPolicy>
+              <preemptable>true</preemptable>
               <numPendingApps>0</numPendingApps>
               <numActiveApps>0</numActiveApps>
             </queue>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org