You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2018/07/25 01:31:44 UTC

[01/50] hadoop git commit: HDFS-13733. RBF: Add Web UI configurations and descriptions to RBF document. Contributed by Takanobu Asanuma. [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-15461 ad353e3a1 -> bac459b3f (forced update)


HDFS-13733. RBF: Add Web UI configurations and descriptions to RBF document. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1af87df2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1af87df2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1af87df2

Branch: refs/heads/HADOOP-15461
Commit: 1af87df242c4286474961078d306a5692f85debc
Parents: 0a1e922
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Jul 17 10:45:08 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Jul 17 10:45:08 2018 +0800

----------------------------------------------------------------------
 .../src/site/markdown/HDFSRouterFederation.md           | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1af87df2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 73e0f4a..c5bf5e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -330,6 +330,18 @@ The administration server to manage the Mount Table.
 | dfs.federation.router.admin-bind-host | 0.0.0.0 | The actual address the RPC admin server will bind to. |
 | dfs.federation.router.admin.handler.count | 1 | The number of server threads for the router to handle RPC requests from admin. |
 
+### HTTP Server
+
+The HTTP Server to provide Web UI and the HDFS REST interface ([WebHDFS](../hadoop-hdfs/WebHDFS.html)) for the clients. The default URL is "`http://router_host:50071`".
+
+| Property | Default | Description|
+|:---- |:---- |:---- |
+| dfs.federation.router.http.enable | `true` | If `true`, the HTTP service to handle client requests in the router is enabled. |
+| dfs.federation.router.http-address | 0.0.0.0:50071 | HTTP address that handles the web requests to the Router. |
+| dfs.federation.router.http-bind-host | 0.0.0.0 | The actual address the HTTP server will bind to. |
+| dfs.federation.router.https-address | 0.0.0.0:50072 | HTTPS address that handles the web requests to the Router. |
+| dfs.federation.router.https-bind-host | 0.0.0.0 | The actual address the HTTPS server will bind to. |
+
 ### State Store
 
 The connection to the State Store and the internal caching at the Router.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] hadoop git commit: HADOOP-15536. Adding support in FileUtil for the creation of directories. Contributed by Giovanni Matteo Fumarola.

Posted by in...@apache.org.
HADOOP-15536. Adding support in FileUtil for the creation of directories. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bac459b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bac459b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bac459b3

Branch: refs/heads/HADOOP-15461
Commit: bac459b3fffb62e04ecf39ebf0c3ff54b2dc49c0
Parents: 866646e
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Jun 29 13:38:24 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Jul 24 18:30:49 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileUtil.java     |  50 +++++
 .../apache/hadoop/fs/TestFileUtilsMkDir.java    | 205 +++++++++++++++++++
 2 files changed, 255 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bac459b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index f3b5d58..bf3feb5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -57,6 +57,7 @@ import java.util.zip.ZipInputStream;
 import org.apache.commons.collections.map.CaseInsensitiveMap;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.io.FileExistsException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -1638,4 +1639,53 @@ public class FileUtil {
     // check for ports
     return srcUri.getPort()==dstUri.getPort();
   }
+
+  /**
+   * Creates the directory named by the destination pathname, including any
+   * necessary but nonexistent parent directories. Note that if this operation
+   * fails it may have succeeded in creating some of the necessary parent
+   * directories.
+   *
+   * @param dst the directory which creation should be performed.
+   * @return 0 on success or if the directory was already present, 1 otherwise.
+   * @throws FileExistsException if the dst is an existing file
+   */
+  public static int mkDirs(String dst) throws FileAlreadyExistsException {
+    // Null pointer input check
+    if (dst == null) {
+      LOG.warn("Can not create a directory with null path");
+      return 1;
+    }
+    File directory = new File(dst);
+
+    // Create the directory(ies)
+    boolean result = false;
+    try {
+      result = directory.mkdirs();
+    } catch (SecurityException e) {
+      LOG.warn("Unable to create the directory {}. Exception = {}", dst,
+          e.getMessage());
+      return 1;
+    }
+
+    // Check if mkdir created successfully the directory(ies)
+    if (result) {
+      LOG.debug("Directory created successfully: {}", dst);
+      return 0;
+    } else {
+      // File already present check
+      if (directory.exists()) {
+        if (directory.isFile()) {
+          throw new FileAlreadyExistsException(
+              "Can not create a directory since a file is already present"
+                  + " at the destination " + dst);
+        }
+        LOG.debug("Directory already present {}", dst);
+        return 0;
+      }
+      LOG.warn("Unable to create the directory {}", dst);
+      return 1;
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bac459b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java
new file mode 100644
index 0000000..0e6bfdb
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.FileAlreadyExistsException;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFileUtilsMkDir {
+
+  private static final File TEST_DIR = GenericTestUtils.getTestDir("nm");
+  private final File del = new File(TEST_DIR, "del");
+
+  @Before
+  public void before() {
+    cleanupImpl();
+  }
+
+  @After
+  public void tearDown() {
+    cleanupImpl();
+  }
+
+  private void cleanupImpl() {
+    FileUtil.fullyDelete(del, true);
+    Assert.assertTrue(!del.exists());
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#mkDirs(String)} in
+   * case of null pointer inputs.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testMkDirsWithNullInput() throws IOException {
+    int result = FileUtil.mkDirs(null);
+    Assert.assertEquals(1, result);
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#mkDirs(String)}.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testMkDirs() throws IOException {
+    Assert.assertFalse(del.exists());
+    del.mkdirs();
+
+    File directory = new File(del, "newDirectory");
+    Assert.assertFalse(directory.exists());
+
+    // Create the directory
+    int result = FileUtil.mkDirs(directory.getAbsolutePath());
+    Assert.assertEquals(0, result);
+
+    Assert.assertTrue(directory.exists());
+    Assert.assertTrue(directory.isDirectory());
+
+    directory.delete();
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#mkDirs(String)} in
+   * case of multiple parents.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testMkDirsMultipleParents() throws IOException {
+    Assert.assertFalse(del.exists());
+    del.mkdirs();
+
+    File directory = new File(del, "newParent1/newParent2/newDirectory");
+    Assert.assertFalse(directory.exists());
+
+    // Create the directory
+    int result = FileUtil.mkDirs(directory.getAbsolutePath());
+    Assert.assertEquals(0, result);
+
+    Assert.assertTrue(directory.exists());
+    Assert.assertTrue(directory.isDirectory());
+
+    Assert.assertTrue(directory.getParentFile().exists());
+    Assert.assertTrue(directory.getParentFile().isDirectory());
+
+    Assert.assertTrue(directory.getParentFile().getParentFile().exists());
+    Assert.assertTrue(directory.getParentFile().getParentFile().isDirectory());
+
+    directory.getParentFile().getParentFile().delete();
+    directory.getParentFile().delete();
+    directory.delete();
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#mkDirs(String)} in
+   * case of multiple same executions of MkDir.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testMkDirsMultipleTimes() throws IOException {
+    Assert.assertFalse(del.exists());
+    del.mkdirs();
+
+    File directory = new File(del, "newDirectory");
+    Assert.assertFalse(directory.exists());
+
+    // Create the directory
+    int result = FileUtil.mkDirs(directory.getAbsolutePath());
+    Assert.assertEquals(0, result);
+
+    Assert.assertTrue(directory.exists());
+    Assert.assertTrue(directory.isDirectory());
+
+    result = FileUtil.mkDirs(directory.getAbsolutePath());
+    Assert.assertEquals(0, result);
+
+    directory.delete();
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#mkDirs(String)} in
+   * case of a creation over a file.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testMkDirsOverAFile() throws IOException {
+    Assert.assertFalse(del.exists());
+    del.mkdirs();
+
+    File falseDirectory = new File(del, "newDirectory");
+    Assert.assertFalse(falseDirectory.exists());
+
+    byte[] data = "some data".getBytes();
+
+    // write some data to the file
+    FileOutputStream os = new FileOutputStream(falseDirectory);
+    os.write(data);
+    os.close();
+
+    // Create the directory
+    try {
+      FileUtil.mkDirs(falseDirectory.getAbsolutePath());
+      Assert.fail("The test should fail with FileAlreadyExistsException");
+    } catch (FileAlreadyExistsException e) {
+      Assert.assertTrue(e.getMessage().startsWith(
+          "Can not create a directory since a file is already present"));
+    }
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#mkDirs(String)} in
+   * case of a creation underneath a file.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testMkDirsUnderNeathAFile() throws IOException {
+    Assert.assertFalse(del.exists());
+    del.mkdirs();
+
+    File newFile = new File(del, "newFile");
+    Assert.assertFalse(newFile.exists());
+
+    byte[] data = "some data".getBytes();
+
+    // write some data to the file
+    FileOutputStream os = new FileOutputStream(newFile);
+    os.write(data);
+    os.close();
+
+    File falseDirectory = new File(del, "newFile/newDirectory");
+
+    // Create the directory
+    int result = FileUtil.mkDirs(falseDirectory.getAbsolutePath());
+    Assert.assertEquals(1, result);
+
+    Assert.assertFalse(falseDirectory.exists());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] hadoop git commit: HDDS-249. Fail if multiple SCM IDs on the DataNode and add SCM ID check after version request. Contributed by Bharat Viswanadham.

Posted by in...@apache.org.
HDDS-249. Fail if multiple SCM IDs on the DataNode and add SCM ID check after version request. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fa9e301
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fa9e301
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fa9e301

Branch: refs/heads/HADOOP-15461
Commit: 9fa9e301b0471f38530a3cb596b00064436d311d
Parents: 993ec02
Author: Nanda kumar <na...@apache.org>
Authored: Sat Jul 21 18:46:31 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Sat Jul 21 18:46:31 2018 +0530

----------------------------------------------------------------------
 .../states/endpoint/VersionEndpointTask.java    | 27 ++++++++--
 .../container/common/utils/HddsVolumeUtil.java  | 56 ++++++++++++++++++++
 .../container/ozoneimpl/ContainerReader.java    | 22 +++++---
 .../container/ozoneimpl/OzoneContainer.java     |  2 +-
 .../ozone/container/common/ScmTestMock.java     | 27 +++++++++-
 .../common/TestDatanodeStateMachine.java        | 14 ++---
 .../ozone/container/common/TestEndPoint.java    | 49 +++++++++++++++++
 7 files changed, 177 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index d782b59..64e078d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -23,10 +23,14 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.statemachine
     .EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.Map;
@@ -37,6 +41,8 @@ import java.util.concurrent.Callable;
  */
 public class VersionEndpointTask implements
     Callable<EndpointStateMachine.EndPointStates> {
+  public static final Logger LOG = LoggerFactory.getLogger(VersionEndpointTask
+      .class);
   private final EndpointStateMachine rpcEndPoint;
   private final Configuration configuration;
   private final OzoneContainer ozoneContainer;
@@ -71,21 +77,32 @@ public class VersionEndpointTask implements
 
       Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " +
           "null");
-      Preconditions.checkNotNull(scmId, "Reply from SCM: clusterId cannot be" +
-          " null");
+      Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
+          "cannot be null");
 
       // If version file does not exist create version file and also set scmId
       for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
         HddsVolume hddsVolume = entry.getValue();
-        hddsVolume.format(clusterId);
-        ozoneContainer.getDispatcher().setScmId(scmId);
+        boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
+            clusterId, LOG);
+        if (!result) {
+          volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
+        }
       }
+      if (volumeSet.getVolumesList().size() == 0) {
+        // All volumes are inconsistent state
+        throw new DiskOutOfSpaceException("All configured Volumes are in " +
+            "Inconsistent State");
+      }
+      ozoneContainer.getDispatcher().setScmId(scmId);
 
       EndpointStateMachine.EndPointStates nextState =
           rpcEndPoint.getState().getNextState();
       rpcEndPoint.setState(nextState);
       rpcEndPoint.zeroMissedCount();
-    } catch (IOException ex) {
+    } catch (DiskOutOfSpaceException ex) {
+      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
+    } catch(IOException ex) {
       rpcEndPoint.logIfNeeded(ex);
     } finally {
       rpcEndPoint.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 5d6fc0a..bc0bd05 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -25,8 +25,10 @@ import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
 import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.Properties;
 import java.util.UUID;
 
@@ -160,4 +162,58 @@ public final class HddsVolumeUtil {
     }
     return value;
   }
+
+  /**
+   * Check Volume is consistent state or not.
+   * @param hddsVolume
+   * @param scmId
+   * @param clusterId
+   * @param logger
+   * @return true - if volume is in consistent state, otherwise false.
+   */
+  public static boolean checkVolume(HddsVolume hddsVolume, String scmId, String
+      clusterId, Logger logger) {
+    File hddsRoot = hddsVolume.getHddsRootDir();
+    String volumeRoot = hddsRoot.getPath();
+    File scmDir = new File(hddsRoot, scmId);
+
+    try {
+      hddsVolume.format(clusterId);
+    } catch (IOException ex) {
+      logger.error("Error during formatting volume {}, exception is {}",
+          volumeRoot, ex);
+      return false;
+    }
+
+    File[] hddsFiles = hddsRoot.listFiles();
+
+    if(hddsFiles == null) {
+      // This is the case for IOException, where listFiles returns null.
+      // So, we fail the volume.
+      return false;
+    } else if (hddsFiles.length == 1) {
+      // DN started for first time or this is a newly added volume.
+      // So we create scm directory.
+      if (!scmDir.mkdir()) {
+        logger.error("Unable to create scmDir {}", scmDir);
+        return false;
+      }
+      return true;
+    } else if(hddsFiles.length == 2) {
+      // The files should be Version and SCM directory
+      if (scmDir.exists()) {
+        return true;
+      } else {
+        logger.error("Volume {} is in Inconsistent state, expected scm " +
+                "directory {} does not exist", volumeRoot, scmDir
+            .getAbsolutePath());
+        return false;
+      }
+    } else {
+      // The hdds root dir should always have 2 files. One is Version file
+      // and other is SCM directory.
+      return false;
+    }
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index 986aa16..c1595b2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
@@ -64,14 +65,16 @@ public class ContainerReader implements Runnable {
   private final ContainerSet containerSet;
   private final OzoneConfiguration config;
   private final File hddsVolumeDir;
+  private final VolumeSet volumeSet;
 
-  ContainerReader(HddsVolume volume, ContainerSet cset, OzoneConfiguration
-      conf) {
+  ContainerReader(VolumeSet volSet, HddsVolume volume, ContainerSet cset,
+                  OzoneConfiguration conf) {
     Preconditions.checkNotNull(volume);
     this.hddsVolume = volume;
     this.hddsVolumeDir = hddsVolume.getHddsRootDir();
     this.containerSet = cset;
     this.config = conf;
+    this.volumeSet = volSet;
   }
 
   @Override
@@ -97,10 +100,18 @@ public class ContainerReader implements Runnable {
     });
 
     if (scmDir == null) {
-      LOG.error("Volume {} is empty with out metadata and chunks",
+      LOG.error("IO error for the volume {}, skipped loading",
           hddsVolumeRootDir);
+      volumeSet.failVolume(hddsVolumeRootDir.getPath());
       return;
     }
+
+    if (scmDir.length > 1) {
+      LOG.error("Volume {} is in Inconsistent state", hddsVolumeRootDir);
+      volumeSet.failVolume(hddsVolumeRootDir.getPath());
+      return;
+    }
+
     for (File scmLoc : scmDir) {
       File currentDir = null;
       currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT);
@@ -123,9 +134,8 @@ public class ContainerReader implements Runnable {
                     verifyContainerFile(containerName, containerFile,
                         checksumFile);
                   } else {
-                    LOG.error(
-                        "Missing container metadata files for Container: " +
-                            "{}", containerName);
+                    LOG.error("Missing container metadata files for " +
+                        "Container: {}", containerName);
                   }
                 } else {
                   LOG.error("Missing container metadata directory for " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 8c3a0a2..8f067d9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -106,7 +106,7 @@ public class OzoneContainer {
     while (volumeSetIterator.hasNext()) {
       HddsVolume volume = volumeSetIterator.next();
       File hddsVolumeRootDir = volume.getHddsRootDir();
-      Thread thread = new Thread(new ContainerReader(volume,
+      Thread thread = new Thread(new ContainerReader(volumeSet, volume,
           containerSet, config));
       thread.start();
       volumeThreads.add(thread);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index fb8e7c1..8827d1d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -56,6 +56,13 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   private AtomicInteger heartbeatCount = new AtomicInteger(0);
   private AtomicInteger rpcCount = new AtomicInteger(0);
   private AtomicInteger containerReportsCount = new AtomicInteger(0);
+  private String clusterId;
+  private String scmId;
+
+  public ScmTestMock() {
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+  }
 
   // Map of datanode to containers
   private Map<DatanodeDetails, Map<String, ContainerInfo>> nodeContainers =
@@ -157,8 +164,8 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
     return VersionResponse.newBuilder()
         .setVersion(versionInfo.getVersion())
         .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription())
-        .addValue(OzoneConsts.SCM_ID, UUID.randomUUID().toString())
-        .addValue(OzoneConsts.CLUSTER_ID, UUID.randomUUID().toString())
+        .addValue(OzoneConsts.SCM_ID, scmId)
+        .addValue(OzoneConsts.CLUSTER_ID, clusterId)
         .build().getProtobufMessage();
 
   }
@@ -329,4 +336,20 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   public void addScmCommandRequest(SCMCommandProto scmCmd) {
     scmCommandRequests.add(scmCmd);
   }
+
+  /**
+   * Set scmId.
+   * @param id
+   */
+  public void setScmId(String id) {
+    this.scmId = id;
+  }
+
+  /**
+   * Set scmId.
+   * @return scmId
+   */
+  public String getScmId() {
+    return scmId;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index ece7545..59029db 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common;
 import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ipc.RPC;
@@ -57,9 +58,9 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -68,7 +69,9 @@ import static org.junit.Assert.assertTrue;
 public class TestDatanodeStateMachine {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestDatanodeStateMachine.class);
-  private final int scmServerCount = 3;
+  // Changing it to 1, as current code checks for multiple scm directories,
+  // and fail if exists
+  private final int scmServerCount = 1;
   private List<String> serverAddresses;
   private List<RPC.Server> scmServers;
   private List<ScmTestMock> mockServers;
@@ -90,7 +93,6 @@ public class TestDatanodeStateMachine {
       String address = "127.0.0.1";
       serverAddresses.add(address + ":" + port);
       ScmTestMock mock = new ScmTestMock();
-
       scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock,
           new InetSocketAddress(address, port), 10));
       mockServers.add(mock);
@@ -107,7 +109,7 @@ public class TestDatanodeStateMachine {
     }
 
     File dataDir = new File(testRoot, "data");
-    conf.set(DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
+    conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath());
     if (!dataDir.mkdirs()) {
       LOG.info("Data dir create failed.");
     }
@@ -145,7 +147,7 @@ public class TestDatanodeStateMachine {
     } catch (Exception e) {
       //ignore all execption from the shutdown
     } finally {
-      testRoot.delete();
+      FileUtil.fullyDelete(testRoot);
     }
   }
 
@@ -162,7 +164,7 @@ public class TestDatanodeStateMachine {
       stateMachine.startDaemon();
       SCMConnectionManager connectionManager =
           stateMachine.getConnectionManager();
-      GenericTestUtils.waitFor(() -> connectionManager.getValues().size() == 3,
+      GenericTestUtils.waitFor(() -> connectionManager.getValues().size() == 1,
           1000, 30000);
 
       stateMachine.stopDaemon();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index be8bd87..6619d26 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -70,8 +70,10 @@ import org.apache.hadoop.ozone.container.common.states.endpoint
     .RegisterEndpointTask;
 import org.apache.hadoop.ozone.container.common.states.endpoint
     .VersionEndpointTask;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.AfterClass;
@@ -175,6 +177,53 @@ public class TestEndPoint {
   }
 
   @Test
+  public void testCheckVersionResponse() throws Exception {
+    OzoneConfiguration conf = SCMTestUtils.getConf();
+    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
+        serverAddress, 1000)) {
+      GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
+          .captureLogs(VersionEndpointTask.LOG);
+      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+          conf);
+      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
+      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
+          conf, ozoneContainer);
+      EndpointStateMachine.EndPointStates newState = versionTask.call();
+
+      // if version call worked the endpoint should automatically move to the
+      // next state.
+      Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
+          newState);
+
+      // Now rpcEndpoint should remember the version it got from SCM
+      Assert.assertNotNull(rpcEndPoint.getVersion());
+
+      // Now change server scmId, so datanode scmId  will be
+      // different from SCM server response scmId
+      String newScmId = UUID.randomUUID().toString();
+      scmServerImpl.setScmId(newScmId);
+      newState = versionTask.call();
+      Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
+            newState);
+      List<HddsVolume> volumesList = ozoneContainer.getVolumeSet()
+          .getFailedVolumesList();
+      Assert.assertTrue(volumesList.size() == 1);
+      File expectedScmDir = new File(volumesList.get(0).getHddsRootDir(),
+          scmServerImpl.getScmId());
+      Assert.assertTrue(logCapturer.getOutput().contains("expected scm " +
+          "directory " + expectedScmDir.getAbsolutePath() + " does not " +
+          "exist"));
+      Assert.assertTrue(ozoneContainer.getVolumeSet().getVolumesList().size()
+          == 0);
+      Assert.assertTrue(ozoneContainer.getVolumeSet().getFailedVolumesList()
+          .size() == 1);
+
+    }
+  }
+
+
+
+  @Test
   /**
    * This test makes a call to end point where there is no SCM server. We
    * expect that versionTask should be able to handle it.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] hadoop git commit: YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt.

Posted by in...@apache.org.
YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84612788
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84612788
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84612788

Branch: refs/heads/HADOOP-15461
Commit: 84612788339392fcda1aef0e27c43f5c6b2a19e5
Parents: 16f9aee
Author: bibinchundatt <bi...@apache.org>
Authored: Tue Jul 24 13:09:17 2018 +0530
Committer: bibinchundatt <bi...@apache.org>
Committed: Tue Jul 24 13:09:17 2018 +0530

----------------------------------------------------------------------
 .../src/main/conf/hadoop-policy.xml             | 20 ++++++++++++++++
 .../dev-support/findbugs-exclude.xml            |  4 ++++
 .../hadoop/yarn/conf/YarnConfiguration.java     |  7 ++++++
 .../yarn/conf/TestYarnConfigurationFields.java  |  4 ++++
 .../nodemanager/amrmproxy/AMRMProxyService.java |  8 +++++++
 .../collectormanager/NMCollectorService.java    |  2 +-
 .../containermanager/ContainerManagerImpl.java  |  2 +-
 .../localizer/ResourceLocalizationService.java  |  2 +-
 .../security/authorize/NMPolicyProvider.java    | 25 ++++++++++++++++++--
 .../security/authorize/RMPolicyProvider.java    |  3 +++
 10 files changed, 72 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
index cf3dd1f..bd7c111 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
@@ -242,4 +242,24 @@
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
   </property>
+
+  <property>
+    <name>security.applicationmaster-nodemanager.applicationmaster.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationMasterProtocol, used by the Nodemanager
+        and ApplicationMasters to communicate.
+        The ACL is a comma-separated list of user and group names. The user and
+        group list is separated by a blank. For e.g. "alice,bob users,wheel".
+        A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.distributedscheduling.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DistributedSchedulingAMProtocol, used by the Nodemanager
+        and Resourcemanager to communicate.
+        The ACL is a comma-separated list of user and group names. The user and
+        group list is separated by a blank. For e.g. "alice,bob users,wheel".
+        A special value of "*" means all users are allowed.</description>
+    </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5cc81e5..216c3bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -468,6 +468,10 @@
       <Bug pattern="DC_DOUBLECHECK" />
   </Match>
 
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider"/>
+    <Bug pattern="DC_DOUBLECHECK" />
+  </Match>
   <!-- ApplicationClassLoader is deprecated and moved to hadoop-common; ignore
        warning on the identical name as it should be removed later -->
   <Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9156c2d..bbf877f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2248,6 +2248,9 @@ public class YarnConfiguration extends Configuration {
   public static final String 
   YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL =
       "security.applicationmaster.protocol.acl";
+  public static final String
+      YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL =
+      "security.distributedscheduling.protocol.acl";
 
   public static final String 
   YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL =
@@ -2264,6 +2267,10 @@ public class YarnConfiguration extends Configuration {
       YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL =
       "security.collector-nodemanager.protocol.acl";
 
+  public static final String
+      YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_NODEMANAGER_PROTOCOL =
+      "security.applicationmaster-nodemanager.applicationmaster.protocol.acl";
+
   /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL
    * to a running container */
   public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index b9ba543..9249ed4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -68,6 +68,10 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
             .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
     configurationPropsToSkipCompare.add(YarnConfiguration
         .YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL);
+    configurationPropsToSkipCompare.add(YarnConfiguration
+        .YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL);
+    configurationPropsToSkipCompare.add(YarnConfiguration
+        .YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_NODEMANAGER_PROTOCOL);
     configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR);
     configurationPropsToSkipCompare
         .add(YarnConfiguration.RM_RESERVATION_SYSTEM_MAX_PERIODICITY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 86fbb72..02ff432 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -70,6 +70,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState;
 import org.apache.hadoop.yarn.server.nodemanager.scheduler.DistributedScheduler;
+import org.apache.hadoop.yarn.server.nodemanager.security.authorize
+    .NMPolicyProvider;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
@@ -169,6 +171,12 @@ public class AMRMProxyService extends CompositeService implements
             listenerEndpoint, serverConf, this.secretManager,
             numWorkerThreads);
 
+    if (conf
+        .getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
+            false)) {
+        this.server.refreshServiceAcl(conf, NMPolicyProvider.getInstance());
+    }
+
     this.server.start();
     LOG.info("AMRMProxyService listening on address: "
         + this.server.getListenerAddress());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
index 4648a65..f07ef85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -87,7 +87,7 @@ public class NMCollectorService extends CompositeService implements
 
     if (conf.getBoolean(
         CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
-      server.refreshServiceAcl(conf, new NMPolicyProvider());
+      server.refreshServiceAcl(conf, NMPolicyProvider.getInstance());
     }
 
     server.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 89bef8f..ce240bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -610,7 +610,7 @@ public class ContainerManagerImpl extends CompositeService implements
     if (conf.getBoolean(
         CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
         false)) {
-      refreshServiceAcls(conf, new NMPolicyProvider());
+      refreshServiceAcls(conf, NMPolicyProvider.getInstance());
     }
     
     String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 3f0a6fb..4ca6720 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -406,7 +406,7 @@ public class ResourceLocalizationService extends CompositeService
     if (conf.getBoolean(
         CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
         false)) {
-      server.refreshServiceAcl(conf, new NMPolicyProvider());
+      server.refreshServiceAcl(conf, NMPolicyProvider.getInstance());
     }
     
     return server;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
index 7b28659..c8986f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocolPB;
@@ -32,7 +33,24 @@ import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class NMPolicyProvider extends PolicyProvider {
-  
+
+  private static NMPolicyProvider nmPolicyProvider = null;
+
+  private NMPolicyProvider() {}
+
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public static NMPolicyProvider getInstance() {
+    if (nmPolicyProvider == null) {
+      synchronized(NMPolicyProvider.class) {
+        if (nmPolicyProvider == null) {
+          nmPolicyProvider = new NMPolicyProvider();
+        }
+      }
+    }
+    return nmPolicyProvider;
+  }
+
   private static final Service[] NODE_MANAGER_SERVICES =
       new Service[] {
           new Service(YarnConfiguration.
@@ -43,7 +61,10 @@ public class NMPolicyProvider extends PolicyProvider {
             LocalizationProtocolPB.class),
           new Service(YarnConfiguration.
             YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL,
-            CollectorNodemanagerProtocolPB.class)
+            CollectorNodemanagerProtocolPB.class),
+          new Service(YarnConfiguration.
+              YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_NODEMANAGER_PROTOCOL,
+              ApplicationMasterProtocolPB.class),
       };
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
index 8c5efa1..b56ca23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocolPB;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocolPB;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
 import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
 
@@ -67,6 +68,8 @@ public class RMPolicyProvider extends PolicyProvider {
     new Service(
         YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL, 
         ApplicationMasterProtocolPB.class),
+    new Service(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL,
+              DistributedSchedulingAMProtocolPB.class),
     new Service(
         YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCEMANAGER_ADMINISTRATION_PROTOCOL, 
         ResourceManagerAdministrationProtocolPB.class),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] hadoop git commit: YARN-7300. DiskValidator is not used in LocalDirAllocator. (Szilard Nemeth via Haibo Chen)

Posted by in...@apache.org.
YARN-7300. DiskValidator is not used in LocalDirAllocator. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6873dfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6873dfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6873dfd

Branch: refs/heads/HADOOP-15461
Commit: e6873dfde057e63ce5efa91f3061db3ee1b2e236
Parents: f354f47
Author: Haibo Chen <ha...@apache.org>
Authored: Thu Jul 19 16:27:11 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Thu Jul 19 16:27:11 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/LocalDirAllocator.java | 28 +++++++++++++++-----
 .../nodemanager/LocalDirsHandlerService.java    | 27 ++++++++++++++-----
 2 files changed, 42 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6873dfd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 1c216f4..a4b158a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -24,8 +24,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.util.*;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -78,11 +76,25 @@ public class LocalDirAllocator {
   /** Used when size of file to be allocated is unknown. */
   public static final int SIZE_UNKNOWN = -1;
 
+  private final DiskValidator diskValidator;
+
   /**Create an allocator object
    * @param contextCfgItemName
    */
   public LocalDirAllocator(String contextCfgItemName) {
     this.contextCfgItemName = contextCfgItemName;
+    try {
+      this.diskValidator = DiskValidatorFactory.getInstance(
+              BasicDiskValidator.NAME);
+    } catch (DiskErrorException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  public LocalDirAllocator(String contextCfgItemName,
+          DiskValidator diskValidator) {
+    this.contextCfgItemName = contextCfgItemName;
+    this.diskValidator = diskValidator;
   }
   
   /** This method must be used to obtain the dir allocation context for a 
@@ -96,7 +108,8 @@ public class LocalDirAllocator {
       AllocatorPerContext l = contexts.get(contextCfgItemName);
       if (l == null) {
         contexts.put(contextCfgItemName, 
-                    (l = new AllocatorPerContext(contextCfgItemName)));
+                    (l = new AllocatorPerContext(contextCfgItemName,
+                            diskValidator)));
       }
       return l;
     }
@@ -255,6 +268,7 @@ public class LocalDirAllocator {
     // NOTE: the context must be accessed via a local reference as it
     //       may be updated at any time to reference a different context
     private AtomicReference<Context> currentContext;
+    private final DiskValidator diskValidator;
 
     private static class Context {
       private AtomicInteger dirNumLastAccessed = new AtomicInteger(0);
@@ -280,9 +294,11 @@ public class LocalDirAllocator {
       }
     }
 
-    public AllocatorPerContext(String contextCfgItemName) {
+    public AllocatorPerContext(String contextCfgItemName,
+            DiskValidator diskValidator) {
       this.contextCfgItemName = contextCfgItemName;
       this.currentContext = new AtomicReference<Context>(new Context());
+      this.diskValidator = diskValidator;
     }
 
     /** This method gets called everytime before any read/write to make sure
@@ -312,7 +328,7 @@ public class LocalDirAllocator {
                     ? new File(ctx.localFS.makeQualified(tmpDir).toUri())
                     : new File(dirStrings[i]);
 
-                DiskChecker.checkDir(tmpFile);
+                diskValidator.checkStatus(tmpFile);
                 dirs.add(new Path(tmpFile.getPath()));
                 dfList.add(new DF(tmpFile, 30000));
               } catch (DiskErrorException de) {
@@ -348,7 +364,7 @@ public class LocalDirAllocator {
         //check whether we are able to create a directory here. If the disk
         //happens to be RDONLY we will fail
         try {
-          DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
+          diskValidator.checkStatus(new File(file.getParent().toUri().getPath()));
           return file;
         } catch (DiskErrorException d) {
           LOG.warn("Disk Error Exception: ", d);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6873dfd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
index 621cabc..6eabd0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
@@ -27,6 +27,9 @@ import java.util.List;
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.DiskValidator;
+import org.apache.hadoop.util.DiskValidatorFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -155,13 +158,23 @@ public class LocalDirsHandlerService extends AbstractService {
       String local = conf.get(YarnConfiguration.NM_LOCAL_DIRS);
       conf.set(NM_GOOD_LOCAL_DIRS,
           (local != null) ? local : "");
-      localDirsAllocator = new LocalDirAllocator(
-          NM_GOOD_LOCAL_DIRS);
-      String log = conf.get(YarnConfiguration.NM_LOG_DIRS);
-      conf.set(NM_GOOD_LOG_DIRS,
-          (log != null) ? log : "");
-      logDirsAllocator = new LocalDirAllocator(
-          NM_GOOD_LOG_DIRS);
+      String diskValidatorName = conf.get(YarnConfiguration.DISK_VALIDATOR,
+              YarnConfiguration.DEFAULT_DISK_VALIDATOR);
+      try {
+        DiskValidator diskValidator =
+            DiskValidatorFactory.getInstance(diskValidatorName);
+        localDirsAllocator = new LocalDirAllocator(
+                NM_GOOD_LOCAL_DIRS, diskValidator);
+        String log = conf.get(YarnConfiguration.NM_LOG_DIRS);
+        conf.set(NM_GOOD_LOG_DIRS,
+                (log != null) ? log : "");
+        logDirsAllocator = new LocalDirAllocator(
+                NM_GOOD_LOG_DIRS, diskValidator);
+      } catch (DiskErrorException e) {
+        throw new YarnRuntimeException(
+            "Failed to create DiskValidator of type " + diskValidatorName + "!",
+            e);
+      }
     }
 
     @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] hadoop git commit: HADOOP-15465. Deprecate WinUtils#Symlinks by using native java code. Contributed by Giovanni Matteo Fumarola.

Posted by in...@apache.org.
HADOOP-15465. Deprecate WinUtils#Symlinks by using native java code. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8d2b091
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8d2b091
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8d2b091

Branch: refs/heads/HADOOP-15461
Commit: b8d2b09192ff21d4d6d8d861f295f6a0f4acc682
Parents: 849c45d
Author: Inigo Goiri <in...@apache.org>
Authored: Thu Jun 7 17:02:01 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Jul 24 18:30:46 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileUtil.java     | 60 ++++++++------------
 .../apache/hadoop/fs/RawLocalFileSystem.java    |  2 -
 .../main/java/org/apache/hadoop/util/Shell.java |  9 ++-
 .../hadoop/yarn/server/MiniYARNCluster.java     | 13 ++---
 4 files changed, 37 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8d2b091/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index df89598..61cb8d2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -34,8 +34,10 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.nio.charset.Charset;
 import java.nio.file.AccessDeniedException;
+import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.FileSystems;
 import java.nio.file.Files;
+import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.List;
@@ -1028,17 +1030,15 @@ public class FileUtil {
   }
 
   /**
-   * Create a soft link between a src and destination
-   * only on a local disk. HDFS does not support this.
-   * On Windows, when symlink creation fails due to security
-   * setting, we will log a warning. The return code in this
-   * case is 2.
+   * Create a soft link between a src and destination only on a local disk. On
+   * Windows, when symlink creation fails due to security setting, we will log a
+   * warning. The return code in this case is 2.
    *
    * @param target the target for symlink
    * @param linkname the symlink
    * @return 0 on success
    */
-  public static int symLink(String target, String linkname) throws IOException{
+  public static int symLink(String target, String linkname) throws IOException {
 
     if (target == null || linkname == null) {
       LOG.warn("Can not create a symLink with a target = " + target
@@ -1053,44 +1053,32 @@ public class FileUtil {
     File linkFile = new File(
         Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString());
 
-    String[] cmd = Shell.getSymlinkCommand(
-        targetFile.toString(),
-        linkFile.toString());
-
-    ShellCommandExecutor shExec;
     try {
-      if (Shell.WINDOWS &&
-          linkFile.getParentFile() != null &&
-          !new Path(target).isAbsolute()) {
-        // Relative links on Windows must be resolvable at the time of
-        // creation. To ensure this we run the shell command in the directory
-        // of the link.
-        //
-        shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile());
-      } else {
-        shExec = new ShellCommandExecutor(cmd);
-      }
-      shExec.execute();
-    } catch (Shell.ExitCodeException ec) {
-      int returnVal = ec.getExitCode();
-      if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) {
-        LOG.warn("Fail to create symbolic links on Windows. "
-            + "The default security settings in Windows disallow non-elevated "
-            + "administrators and all non-administrators from creating symbolic links. "
-            + "This behavior can be changed in the Local Security Policy management console");
-      } else if (returnVal != 0) {
-        LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed "
-            + returnVal + " with: " + ec.getMessage());
-      }
-      return returnVal;
+      Files.createSymbolicLink(Paths.get(linkFile.toString()),
+          Paths.get(targetFile.toString()));
+    } catch (SecurityException e3) {
+      LOG.warn("Fail to create symbolic links on Windows. "
+          + "The default security settings in Windows disallow non-elevated "
+          + "administrators and all non-administrators from creating symbolic"
+          + " links. This behavior can be changed in the Local Security Policy"
+          + " management console");
+      return SYMLINK_NO_PRIVILEGE;
+
+    } catch (FileAlreadyExistsException | UnsupportedOperationException e) {
+      LOG.warn("Fail to create symbolic links. ErrorMessage = "
+          + e.getLocalizedMessage());
+      return 1;
+
     } catch (IOException e) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Error while create symlink " + linkname + " to " + target
             + "." + " Exception: " + StringUtils.stringifyException(e));
       }
       throw e;
+
     }
-    return shExec.getExitCode();
+
+    return 0;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8d2b091/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index bd003ae..8333366 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -925,7 +925,6 @@ public class RawLocalFileSystem extends FileSystem {
     return true;
   }
 
-  @SuppressWarnings("deprecation")
   @Override
   public void createSymlink(Path target, Path link, boolean createParent)
       throws IOException {
@@ -941,7 +940,6 @@ public class RawLocalFileSystem extends FileSystem {
       mkdirs(link.getParent());
     }
 
-    // NB: Use createSymbolicLink in java.nio.file.Path once available
     int result = FileUtil.symLink(target.toString(),
         makeAbsolute(link).toString());
     if (result != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8d2b091/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 46a0fcc..e902af0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -37,7 +37,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -297,7 +296,13 @@ public abstract class Shell {
         : new String[] { "chown", owner };
   }
 
-  /** Return a command to create symbolic links. */
+  /**
+   * Return a command to create symbolic links.
+   *
+   * Deprecated and likely to be deleted in the near future. Please use
+   * FileUtil.symlink().
+   */
+  @Deprecated
   public static String[] getSymlinkCommand(String target, String link) {
     return WINDOWS ?
        new String[] { getWinUtilsPath(), "symlink", link, target }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8d2b091/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 0395138..01ac02c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -42,7 +43,6 @@ import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.conf.HAUtil;
@@ -202,14 +202,13 @@ public class MiniYARNCluster extends CompositeService {
       // Guarantee target exists before creating symlink.
       targetWorkDir.mkdirs();
 
-      ShellCommandExecutor shexec = new ShellCommandExecutor(
-        Shell.getSymlinkCommand(targetPath, linkPath));
       try {
-        shexec.execute();
+        FileUtil.symLink(targetPath, linkPath);
       } catch (IOException e) {
-        throw new YarnRuntimeException(String.format(
-          "failed to create symlink from %s to %s, shell output: %s", linkPath,
-          targetPath, shexec.getOutput()), e);
+        throw new YarnRuntimeException(
+            String.format("failed to create symlink from %s to %s.",
+                linkPath, targetPath),
+            e);
       }
 
       this.testWorkDir = link;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] hadoop git commit: YARN-8436. FSParentQueue: Comparison method violates its general contract. (Wilfred Spiegelenburg via Haibo Chen)

Posted by in...@apache.org.
YARN-8436. FSParentQueue: Comparison method violates its general contract. (Wilfred Spiegelenburg via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25648847
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25648847
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25648847

Branch: refs/heads/HADOOP-15461
Commit: 2564884757fbf4df7718f814cc448f7f23dad875
Parents: 45d9568
Author: Haibo Chen <ha...@apache.org>
Authored: Thu Jul 19 13:21:57 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Thu Jul 19 13:22:31 2018 -0700

----------------------------------------------------------------------
 .../scheduler/fair/FSParentQueue.java           | 30 +++-----
 .../scheduler/fair/FakeSchedulable.java         |  4 +
 .../TestDominantResourceFairnessPolicy.java     | 77 ++++++++++++++++++++
 3 files changed, 93 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25648847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index 26c5630..d5df549 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
+import java.util.TreeSet;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -188,25 +188,19 @@ public class FSParentQueue extends FSQueue {
       return assigned;
     }
 
-    // Hold the write lock when sorting childQueues
-    writeLock.lock();
-    try {
-      Collections.sort(childQueues, policy.getComparator());
-    } finally {
-      writeLock.unlock();
-    }
-
-    /*
-     * We are releasing the lock between the sort and iteration of the
-     * "sorted" list. There could be changes to the list here:
-     * 1. Add a child queue to the end of the list, this doesn't affect
-     * container assignment.
-     * 2. Remove a child queue, this is probably good to take care of so we
-     * don't assign to a queue that is going to be removed shortly.
-     */
+    // Sort the queues while holding a read lock on this parent only.
+    // The individual entries are not locked and can change which means that
+    // the collection of childQueues can not be sorted by calling Sort().
+    // Locking each childqueue to prevent changes would have a large
+    // performance impact.
+    // We do not have to handle the queue removal case as a queue must be
+    // empty before removal. Assigning an application to a queue and removal of
+    // that queue both need the scheduler lock.
+    TreeSet<FSQueue> sortedChildQueues = new TreeSet<>(policy.getComparator());
     readLock.lock();
     try {
-      for (FSQueue child : childQueues) {
+      sortedChildQueues.addAll(childQueues);
+      for (FSQueue child : sortedChildQueues) {
         assigned = child.assignContainer(node);
         if (!Resources.equals(assigned, Resources.none())) {
           break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25648847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
index 03332b2..01eec73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
@@ -143,4 +143,8 @@ public class FakeSchedulable implements Schedulable {
   public boolean isPreemptable() {
     return true;
   }
+
+  public void setResourceUsage(Resource usage) {
+    this.usage = usage;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25648847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
index 03fd1ef..55b7163 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
@@ -19,11 +19,16 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Comparator;
+import java.util.List;
 import java.util.Map;
+import java.util.TreeSet;
 
 import org.apache.curator.shaded.com.google.common.base.Joiner;
 import org.apache.hadoop.conf.Configuration;
@@ -443,4 +448,76 @@ public class TestDominantResourceFairnessPolicy {
     conf.set(YarnConfiguration.RESOURCE_TYPES, Joiner.on(',').join(resources));
     ResourceUtils.resetResourceTypes(conf);
   }
+
+  @Test
+  public void testModWhileSorting(){
+    final List<FakeSchedulable> schedulableList = new ArrayList<>();
+    for (int i=0; i<10000; i++) {
+      schedulableList.add(
+          (FakeSchedulable)createSchedulable((i%10)*100, (i%3)*2));
+    }
+    Comparator DRFComparator = createComparator(100000, 50000);
+
+    // To simulate unallocated resource changes
+    Thread modThread = modificationThread(schedulableList);
+    modThread.start();
+
+    // This should fail: make sure that we do test correctly
+    // TimSort which is used does not handle the concurrent modification of
+    // objects it is sorting.
+    try {
+      Collections.sort(schedulableList, DRFComparator);
+      fail("Sorting should have failed and did not");
+    } catch (IllegalArgumentException iae) {
+      assertEquals(iae.getMessage(), "Comparison method violates its general contract!");
+    }
+    try {
+      modThread.join();
+    } catch (InterruptedException ie) {
+      fail("ModThread join failed: " + ie.getMessage());
+    }
+
+    // clean up and try again using TreeSet which should work
+    schedulableList.clear();
+    for (int i=0; i<10000; i++) {
+      schedulableList.add(
+          (FakeSchedulable)createSchedulable((i%10)*100, (i%3)*2));
+    }
+    TreeSet<Schedulable> sortedSchedulable = new TreeSet<>(DRFComparator);
+    modThread = modificationThread(schedulableList);
+    modThread.start();
+    sortedSchedulable.addAll(schedulableList);
+    try {
+      modThread.join();
+    } catch (InterruptedException ie) {
+      fail("ModThread join failed: " + ie.getMessage());
+    }
+  }
+
+  /**
+   * Thread to simulate concurrent schedulable changes while sorting
+   */
+  private Thread modificationThread(final List<FakeSchedulable> schedulableList) {
+    Thread modThread  = new Thread() {
+      @Override
+      public void run() {
+        try {
+          // This sleep is needed to make sure the sort has started before the
+          // modifications start and finish
+          Thread.sleep(500);
+        } catch (InterruptedException ie) {
+          fail("Modification thread interrupted while asleep " +
+              ie.getMessage());
+        }
+        Resource newUsage = Resources.createResource(0, 0);
+        for (int j = 0; j < 1000; j++) {
+          FakeSchedulable sched = schedulableList.get(j * 10);
+          newUsage.setMemorySize(20000);
+          newUsage.setVirtualCores(j % 10);
+          sched.setResourceUsage(newUsage);
+        }
+      }
+    };
+    return modThread;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] hadoop git commit: YARN-8541. RM startup failure on recovery after user deletion. Contributed by Bibin A Chundatt.

Posted by in...@apache.org.
YARN-8541. RM startup failure on recovery after user deletion. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e673dd1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e673dd1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e673dd1d

Branch: refs/heads/HADOOP-15461
Commit: e673dd1d4d78b66e7b6705ec6dc3679d2347d704
Parents: cd0b9f1
Author: bibinchundatt <bi...@apache.org>
Authored: Tue Jul 24 18:36:49 2018 +0530
Committer: bibinchundatt <bi...@apache.org>
Committed: Tue Jul 24 23:26:59 2018 +0530

----------------------------------------------------------------------
 .../server/resourcemanager/RMAppManager.java    | 48 ++++++++++----------
 .../placement/PlacementManager.java             |  9 ----
 .../TestWorkPreservingRMRestart.java            | 48 ++++++++++++++++++++
 .../placement/TestPlacementManager.java         | 20 ++++----
 4 files changed, 80 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e673dd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 3e64cfc..7011aaa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -364,17 +364,9 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
       ApplicationSubmissionContext submissionContext, long submitTime,
       String user, boolean isRecovery, long startTime) throws YarnException {
 
-    ApplicationPlacementContext placementContext = null;
-    try {
-      placementContext = placeApplication(rmContext, submissionContext, user);
-    } catch (YarnException e) {
-      String msg =
-          "Failed to place application " + submissionContext.getApplicationId()
-              + " to queue and specified " + "queue is invalid : "
-              + submissionContext.getQueue();
-      LOG.error(msg, e);
-      throw e;
-    }
+    ApplicationPlacementContext placementContext =
+        placeApplication(rmContext.getQueuePlacementManager(),
+            submissionContext, user, isRecovery);
 
     // We only replace the queue when it's a new application
     if (!isRecovery) {
@@ -789,23 +781,31 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
   }
 
   @VisibleForTesting
-  ApplicationPlacementContext placeApplication(RMContext rmContext,
-      ApplicationSubmissionContext context, String user) throws YarnException {
+  ApplicationPlacementContext placeApplication(
+      PlacementManager placementManager, ApplicationSubmissionContext context,
+      String user, boolean isRecovery) throws YarnException {
     ApplicationPlacementContext placementContext = null;
-    PlacementManager placementManager = rmContext.getQueuePlacementManager();
-
     if (placementManager != null) {
-      placementContext = placementManager.placeApplication(context, user);
-    } else{
-      if ( context.getQueue() == null || context.getQueue().isEmpty()) {
-        final String msg = "Queue Placement Manager is not set. Cannot place "
-            + "application : " + context.getApplicationId() + " to queue and "
-            + "specified queue is invalid " + context.getQueue();
-        LOG.error(msg);
-        throw new YarnException(msg);
+      try {
+        placementContext = placementManager.placeApplication(context, user);
+      } catch (YarnException e) {
+        // Placement could also fail if the user doesn't exist in system
+        // skip if the user is not found during recovery.
+        if (isRecovery) {
+          LOG.warn("PlaceApplication failed,skipping on recovery of rm");
+          return placementContext;
+        }
+        throw e;
       }
     }
-
+    if (placementContext == null && (context.getQueue() == null) || context
+        .getQueue().isEmpty()) {
+      String msg = "Failed to place application " + context.getApplicationId()
+          + " to queue and specified " + "queue is invalid : " + context
+          .getQueue();
+      LOG.error(msg);
+      throw new YarnException(msg);
+    }
     return placementContext;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e673dd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java
index 5fa7723..74cf7ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java
@@ -70,15 +70,6 @@ public class PlacementManager {
         }
       }
 
-      // Failed to get where to place application
-      if (null == placement && null == asc.getQueue()) {
-        String msg = "Failed to place application " +
-            asc.getApplicationId() + " to queue and specified "
-            + "queue is invalid : " + asc.getQueue();
-        LOG.error(msg);
-        throw new YarnException(msg);
-      }
-
       return placement;
     } finally {
       readLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e673dd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 88c19a1..a821b0a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -39,8 +39,12 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.TestSecurityMockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.placement
+    .ApplicationPlacementContext;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData;
@@ -105,6 +109,8 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
 import static org.apache.hadoop.yarn.server.resourcemanager.webapp
     .RMWebServices.DEFAULT_QUEUE;
 import static org.junit.Assert.*;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -1555,6 +1561,48 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   }
 
   @Test(timeout = 30000)
+  public void testUnknownUserOnRecovery() throws Exception {
+
+    MockRM rm1 = new MockRM(conf);
+    rm1.start();
+    MockMemoryRMStateStore memStore =
+        (MockMemoryRMStateStore) rm1.getRMStateStore();
+    MockNM nm1 =
+        new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
+    nm1.registerNode();
+
+    // create app and launch the UAM
+    RMApp app0 = rm1.submitApp(200, true);
+    MockAM am0 = MockRM.launchUAM(app0, rm1, nm1);
+    am0.registerAppAttempt();
+    rm1.killApp(app0.getApplicationId());
+    PlacementManager placementMgr = mock(PlacementManager.class);
+    doThrow(new YarnException("No groups for user")).when(placementMgr)
+        .placeApplication(any(ApplicationSubmissionContext.class),
+            any(String.class));
+    MockRM rm2 = new MockRM(conf, memStore) {
+      @Override
+      protected RMAppManager createRMAppManager() {
+        return new RMAppManager(this.rmContext, this.scheduler,
+            this.masterService, this.applicationACLsManager, conf) {
+          @Override
+          ApplicationPlacementContext placeApplication(
+              PlacementManager placementManager,
+              ApplicationSubmissionContext context, String user,
+              boolean isRecovery) throws YarnException {
+            return super
+                .placeApplication(placementMgr, context, user, isRecovery);
+          }
+        };
+      }
+    };
+    rm2.start();
+    RMApp recoveredApp =
+        rm2.getRMContext().getRMApps().get(app0.getApplicationId());
+    Assert.assertEquals(RMAppState.KILLED, recoveredApp.getState());
+  }
+
+  @Test(timeout = 30000)
   public void testDynamicAutoCreatedQueueRecoveryWithDefaultQueue()
       throws Exception {
     //if queue name is not specified, it should submit to 'default' queue

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e673dd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java
index 13111be..db5cd60 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java
@@ -83,16 +83,11 @@ public class TestPlacementManager {
 
     ApplicationSubmissionContext asc = Records.newRecord(
         ApplicationSubmissionContext.class);
+    asc.setQueue(YarnConfiguration.DEFAULT_QUEUE_NAME);
     asc.setApplicationName(APP_NAME);
 
-    boolean caughtException = false;
-    try{
-      pm.placeApplication(asc, USER2);
-    } catch (Exception e) {
-      caughtException = true;
-    }
-    Assert.assertTrue(caughtException);
-
+    Assert.assertNull("Placement should be null",
+        pm.placeApplication(asc, USER2));
     QueueMappingEntity queueMappingEntity = new QueueMappingEntity(APP_NAME,
         USER1, PARENT_QUEUE);
 
@@ -100,12 +95,13 @@ public class TestPlacementManager {
         Arrays.asList(queueMappingEntity));
     queuePlacementRules.add(anRule);
     pm.updateRules(queuePlacementRules);
-    try{
-      pm.placeApplication(asc, USER2);
+    try {
+      ApplicationPlacementContext pc = pm.placeApplication(asc, USER2);
+      Assert.assertNotNull(pc);
     } catch (Exception e) {
-      caughtException = false;
+      e.printStackTrace();
+      Assert.fail("Exception not expected");
     }
-    Assert.assertFalse(caughtException);
   }
 
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] hadoop git commit: HDDS-255. Fix TestOzoneConfigurationFields for missing hdds.command.status.report.interval in config classes. Contributed by Sandeep Nemuri.

Posted by in...@apache.org.
HDDS-255. Fix TestOzoneConfigurationFields for missing hdds.command.status.report.interval in config classes. Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c492eacc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c492eacc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c492eacc

Branch: refs/heads/HADOOP-15461
Commit: c492eaccc21bb53d0d40214290b2fa9c493e2955
Parents: 129269f
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jul 18 11:46:26 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 18 11:46:26 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/ozone/TestOzoneConfigurationFields.java     | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c492eacc/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 717bb68..909cddf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.TestConfigurationFieldsBase;
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
@@ -31,7 +32,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
     xmlFilename = new String("ozone-default.xml");
     configurationClasses =
         new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
-            OMConfigKeys.class};
+            OMConfigKeys.class, HddsConfigKeys.class};
     errorIfMissingConfigProps = true;
     errorIfMissingXmlProps = true;
     xmlPropsToSkipCompare.add("hadoop.tags.custom");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)

Posted by in...@apache.org.
HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica
(Contributed by BELUGA BEHR via Daniel Templeton)

Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/849c45db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/849c45db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/849c45db

Branch: refs/heads/HADOOP-15461
Commit: 849c45db187224095b13fe297a4d7377fbb9d2cd
Parents: 6bec03c
Author: Daniel Templeton <te...@apache.org>
Authored: Tue Jul 24 15:34:19 2018 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Tue Jul 24 16:05:27 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/CreateFlag.java   |  9 ++-
 .../org/apache/hadoop/hdfs/AddBlockFlag.java    | 11 ++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  3 +
 .../hadoop/hdfs/DistributedFileSystem.java      | 11 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../BlockPlacementPolicyDefault.java            |  4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 30 +++++---
 .../server/namenode/TestFSDirWriteFileOp.java   | 79 ++++++++++++++++++++
 8 files changed, 134 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 383d65a..c3e088b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -116,7 +116,14 @@ public enum CreateFlag {
    * Enforce the file to be a replicated file, no matter what its parent
    * directory's replication or erasure coding policy is.
    */
-  SHOULD_REPLICATE((short) 0x80);
+  SHOULD_REPLICATE((short) 0x80),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x100);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
index 6a0805b..b0686d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
@@ -36,7 +36,16 @@ public enum AddBlockFlag {
    *
    * @see CreateFlag#NO_LOCAL_WRITE
    */
-  NO_LOCAL_WRITE((short) 0x01);
+  NO_LOCAL_WRITE((short) 0x01),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   *
+   * @see CreateFlag#IGNORE_CLIENT_LOCALITY
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x02);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 9734752..e977054 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer
     if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) {
       this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE);
     }
+    if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) {
+      this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY);
+    }
     if (progress != null) {
       DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
           +"{}", src);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 82cdd8c..3519c60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -3205,6 +3205,17 @@ public class DistributedFileSystem extends FileSystem
       return this;
     }
 
+    /**
+     * Advise that the first block replica be written without regard to the
+     * client locality.
+     *
+     * @see CreateFlag for the details.
+     */
+    public HdfsDataOutputStreamBuilder ignoreClientLocality() {
+      getFlags().add(CreateFlag.IGNORE_CLIENT_LOCALITY);
+      return this;
+    }
+
     @VisibleForTesting
     @Override
     protected EnumSet<CreateFlag> getFlags() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 0f5ce94..e51aeda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -167,6 +167,7 @@ message AbandonBlockResponseProto { // void response
 
 enum AddBlockFlagProto {
   NO_LOCAL_WRITE = 1; // avoid writing to local node.
+  IGNORE_CLIENT_LOCALITY = 2; // write to a random node
 }
 
 message AddBlockRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index c94232f..6985f55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -280,7 +280,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     if (avoidLocalNode) {
       results = new ArrayList<>(chosenStorage);
       Set<Node> excludedNodeCopy = new HashSet<>(excludedNodes);
-      excludedNodeCopy.add(writer);
+      if (writer != null) {
+        excludedNodeCopy.add(writer);
+      }
       localNode = chooseTarget(numOfReplicas, writer,
           excludedNodeCopy, blocksize, maxNodesPerRack, results,
           avoidStaleNodes, storagePolicy,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 03c349c..2875708 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -269,19 +269,27 @@ class FSDirWriteFileOp {
       BlockManager bm, String src, DatanodeInfo[] excludedNodes,
       String[] favoredNodes, EnumSet<AddBlockFlag> flags,
       ValidateAddBlockResult r) throws IOException {
-    Node clientNode = bm.getDatanodeManager()
-        .getDatanodeByHost(r.clientMachine);
-    if (clientNode == null) {
-      clientNode = getClientNode(bm, r.clientMachine);
-    }
+    Node clientNode = null;
 
-    Set<Node> excludedNodesSet = null;
-    if (excludedNodes != null) {
-      excludedNodesSet = new HashSet<>(excludedNodes.length);
-      Collections.addAll(excludedNodesSet, excludedNodes);
+    boolean ignoreClientLocality = (flags != null
+            && flags.contains(AddBlockFlag.IGNORE_CLIENT_LOCALITY));
+
+    // If client locality is ignored, clientNode remains 'null' to indicate
+    if (!ignoreClientLocality) {
+      clientNode = bm.getDatanodeManager().getDatanodeByHost(r.clientMachine);
+      if (clientNode == null) {
+        clientNode = getClientNode(bm, r.clientMachine);
+      }
     }
-    List<String> favoredNodesList = (favoredNodes == null) ? null
-        : Arrays.asList(favoredNodes);
+
+    Set<Node> excludedNodesSet =
+        (excludedNodes == null) ? new HashSet<>()
+            : new HashSet<>(Arrays.asList(excludedNodes));
+
+    List<String> favoredNodesList =
+        (favoredNodes == null) ? Collections.emptyList()
+            : Arrays.asList(favoredNodes);
+
     // choose targets for the new block to be allocated.
     return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
                                     excludedNodesSet, r.blockSize,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java
new file mode 100644
index 0000000..762fa61
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyByte;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.anyList;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anySet;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.hdfs.AddBlockFlag;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.ValidateAddBlockResult;
+import org.apache.hadoop.net.Node;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+public class TestFSDirWriteFileOp {
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testIgnoreClientLocality() throws IOException {
+    ValidateAddBlockResult addBlockResult =
+        new ValidateAddBlockResult(1024L, 3, (byte) 0x01, null, null, null);
+
+    EnumSet<AddBlockFlag> addBlockFlags =
+        EnumSet.of(AddBlockFlag.IGNORE_CLIENT_LOCALITY);
+
+    BlockManager bmMock = mock(BlockManager.class);
+
+    ArgumentCaptor<Node> nodeCaptor = ArgumentCaptor.forClass(Node.class);
+
+    when(bmMock.chooseTarget4NewBlock(anyString(), anyInt(), any(), anySet(),
+        anyLong(), anyList(), anyByte(), any(), any(), any())).thenReturn(null);
+
+    FSDirWriteFileOp.chooseTargetForNewBlock(bmMock, "localhost", null, null,
+        addBlockFlags, addBlockResult);
+
+    // There should be no other interactions with the block manager when the
+    // IGNORE_CLIENT_LOCALITY is passed in because there is no need to discover
+    // the local node requesting the new block
+    verify(bmMock, times(1)).chooseTarget4NewBlock(anyString(), anyInt(),
+        nodeCaptor.capture(), anySet(), anyLong(), anyList(), anyByte(), any(),
+        any(), any());
+
+    verifyNoMoreInteractions(bmMock);
+
+    assertNull(
+        "Source node was assigned a value. Expected 'null' value because "
+            + "chooseTarget was flagged to ignore source node locality",
+        nodeCaptor.getValue());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] hadoop git commit: YARN-6995. Improve use of ResourceNotFoundException in resource types code. (Daniel Templeton and Szilard Nemeth via Haibo Chen)

Posted by in...@apache.org.
YARN-6995. Improve use of ResourceNotFoundException in resource types code. (Daniel Templeton and Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f354f47f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f354f47f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f354f47f

Branch: refs/heads/HADOOP-15461
Commit: f354f47f9959d8a79baee690858af3e160494c32
Parents: b3b4d4c
Author: Haibo Chen <ha...@apache.org>
Authored: Thu Jul 19 15:34:12 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Thu Jul 19 15:35:05 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/Resource.java       | 22 ++++-----------
 .../exceptions/ResourceNotFoundException.java   | 29 +++++++++++++++-----
 .../api/records/impl/pb/ResourcePBImpl.java     | 10 +++----
 .../hadoop/yarn/util/resource/Resources.java    |  6 ++--
 4 files changed, 34 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f354f47f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 3cac1d1..1a7252d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -257,18 +257,15 @@ public abstract class Resource implements Comparable<Resource> {
    *
    * @param resource name of the resource
    * @return the ResourceInformation object for the resource
-   * @throws ResourceNotFoundException if the resource can't be found
    */
   @Public
   @InterfaceStability.Unstable
-  public ResourceInformation getResourceInformation(String resource)
-      throws ResourceNotFoundException {
+  public ResourceInformation getResourceInformation(String resource) {
     Integer index = ResourceUtils.getResourceTypeIndex().get(resource);
     if (index != null) {
       return resources[index];
     }
-    throw new ResourceNotFoundException("Unknown resource '" + resource
-        + "'. Known resources are " + Arrays.toString(resources));
+    throw new ResourceNotFoundException(this, resource);
   }
 
   /**
@@ -299,12 +296,10 @@ public abstract class Resource implements Comparable<Resource> {
    *
    * @param resource name of the resource
    * @return the value for the resource
-   * @throws ResourceNotFoundException if the resource can't be found
    */
   @Public
   @InterfaceStability.Unstable
-  public long getResourceValue(String resource)
-      throws ResourceNotFoundException {
+  public long getResourceValue(String resource) {
     return getResourceInformation(resource).getValue();
   }
 
@@ -313,13 +308,11 @@ public abstract class Resource implements Comparable<Resource> {
    *
    * @param resource the resource for which the ResourceInformation is provided
    * @param resourceInformation ResourceInformation object
-   * @throws ResourceNotFoundException if the resource is not found
    */
   @Public
   @InterfaceStability.Unstable
   public void setResourceInformation(String resource,
-      ResourceInformation resourceInformation)
-      throws ResourceNotFoundException {
+      ResourceInformation resourceInformation) {
     if (resource.equals(ResourceInformation.MEMORY_URI)) {
       this.setMemorySize(resourceInformation.getValue());
       return;
@@ -348,8 +341,7 @@ public abstract class Resource implements Comparable<Resource> {
       ResourceInformation resourceInformation)
       throws ResourceNotFoundException {
     if (index < 0 || index >= resources.length) {
-      throw new ResourceNotFoundException("Unknown resource at index '" + index
-          + "'. Valid resources are " + Arrays.toString(resources));
+      throwExceptionWhenArrayOutOfBound(index);
     }
     ResourceInformation.copy(resourceInformation, resources[index]);
   }
@@ -360,12 +352,10 @@ public abstract class Resource implements Comparable<Resource> {
    *
    * @param resource the resource for which the value is provided.
    * @param value    the value to set
-   * @throws ResourceNotFoundException if the resource is not found
    */
   @Public
   @InterfaceStability.Unstable
-  public void setResourceValue(String resource, long value)
-      throws ResourceNotFoundException {
+  public void setResourceValue(String resource, long value) {
     if (resource.equals(ResourceInformation.MEMORY_URI)) {
       this.setMemorySize(value);
       return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f354f47f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java
index b5fece7..3fddcff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.yarn.exceptions;
 
+import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.Resource;
 
 /**
  * This exception is thrown when details of an unknown resource type
@@ -28,18 +30,31 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 public class ResourceNotFoundException extends YarnRuntimeException {
-
   private static final long serialVersionUID = 10081982L;
+  private static final String MESSAGE = "The resource manager encountered a "
+      + "problem that should not occur under normal circumstances. "
+      + "Please report this error to the Hadoop community by opening a "
+      + "JIRA ticket at http://issues.apache.org/jira and including the "
+      + "following information:%n* Resource type requested: %s%n* Resource "
+      + "object: %s%n* The stack trace for this exception: %s%n"
+      + "After encountering this error, the resource manager is "
+      + "in an inconsistent state. It is safe for the resource manager "
+      + "to be restarted as the error encountered should be transitive. "
+      + "If high availability is enabled, failing over to "
+      + "a standby resource manager is also safe.";
 
-  public ResourceNotFoundException(String message) {
-    super(message);
+  public ResourceNotFoundException(Resource resource, String type) {
+    this(String.format(MESSAGE, type, resource,
+        ExceptionUtils.getStackTrace(new Exception())));
   }
 
-  public ResourceNotFoundException(Throwable cause) {
-    super(cause);
+  public ResourceNotFoundException(Resource resource, String type,
+      Throwable cause) {
+    super(String.format(MESSAGE, type, resource,
+        ExceptionUtils.getStackTrace(cause)), cause);
   }
 
-  public ResourceNotFoundException(String message, Throwable cause) {
-    super(message, cause);
+  public ResourceNotFoundException(String message) {
+    super(message);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f354f47f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 6ebed6e..15d2470 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -193,8 +193,7 @@ public class ResourcePBImpl extends Resource {
   }
 
   @Override
-  public void setResourceValue(String resource, long value)
-      throws ResourceNotFoundException {
+  public void setResourceValue(String resource, long value) {
     maybeInitBuilder();
     if (resource == null) {
       throw new IllegalArgumentException("resource type object cannot be null");
@@ -203,14 +202,13 @@ public class ResourcePBImpl extends Resource {
   }
 
   @Override
-  public ResourceInformation getResourceInformation(String resource)
-      throws ResourceNotFoundException {
+  public ResourceInformation getResourceInformation(String resource) {
+    initResources();
     return super.getResourceInformation(resource);
   }
 
   @Override
-  public long getResourceValue(String resource)
-      throws ResourceNotFoundException {
+  public long getResourceValue(String resource) {
     return super.getResourceValue(resource);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f354f47f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index ace8b5d..db0f980 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -128,14 +128,12 @@ public class Resources {
 
     @Override
     public void setResourceInformation(String resource,
-        ResourceInformation resourceInformation)
-        throws ResourceNotFoundException {
+        ResourceInformation resourceInformation) {
       throw new RuntimeException(name + " cannot be modified!");
     }
 
     @Override
-    public void setResourceValue(String resource, long value)
-        throws ResourceNotFoundException {
+    public void setResourceValue(String resource, long value) {
       throw new RuntimeException(name + " cannot be modified!");
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] hadoop git commit: HDDS-257. Hook up VolumeSet#shutdown from HddsDispatcher#shutdown. Contributed by Hanisha Koneru

Posted by in...@apache.org.
HDDS-257. Hook up VolumeSet#shutdown from HddsDispatcher#shutdown. Contributed by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba25d27d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba25d27d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba25d27d

Branch: refs/heads/HADOOP-15461
Commit: ba25d27ddb8d32abf5e1314a51eec7cad789b316
Parents: de894d3
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Fri Jul 20 12:41:52 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Fri Jul 20 12:41:52 2018 -0700

----------------------------------------------------------------------
 .../container/common/impl/HddsDispatcher.java   |  2 ++
 .../container/common/volume/HddsVolume.java     |  2 --
 .../container/common/volume/VolumeInfo.java     |  8 ++++++
 .../container/common/volume/VolumeSet.java      | 18 +++++++++++++-
 .../container/common/volume/VolumeUsage.java    | 17 -------------
 .../container/common/volume/TestVolumeSet.java  | 26 +++++++++++++++++---
 6 files changed, 50 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index f0c2aa9..bee8417 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -81,6 +81,8 @@ public class HddsDispatcher implements ContainerDispatcher {
 
   @Override
   public void shutdown() {
+    // Shutdown the volumes
+    volumeSet.shutdown();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 6468720..0cbfd9f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -370,6 +370,4 @@ public final class HddsVolume {
   public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
     volumeInfo.setScmUsageForTesting(scmUsageForTest);
   }
-
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index 4b13d45..62fca63 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -129,4 +129,12 @@ public class VolumeInfo {
   public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
     usage.setScmUsageForTesting(scmUsageForTest);
   }
+
+  /**
+   * Only for testing. Do not use otherwise.
+   */
+  @VisibleForTesting
+  public VolumeUsage getUsageForTesting() {
+    return usage;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 2dd4763..4dfde37 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -23,9 +23,11 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
+
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
@@ -40,6 +42,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.InstrumentedLock;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -89,6 +92,8 @@ public class VolumeSet {
   private final String datanodeUuid;
   private String clusterID;
 
+  private Runnable shutdownHook;
+
   public VolumeSet(String dnUuid, Configuration conf)
       throws DiskOutOfSpaceException {
     this(dnUuid, null, conf);
@@ -155,6 +160,13 @@ public class VolumeSet {
     if (volumeMap.size() == 0) {
       throw new DiskOutOfSpaceException("No storage location configured");
     }
+
+    // Ensure volume threads are stopped and scm df is saved during shutdown.
+    shutdownHook = () -> {
+      shutdown();
+    };
+    ShutdownHookManager.get().addShutdownHook(shutdownHook,
+        SHUTDOWN_HOOK_PRIORITY);
   }
 
   /**
@@ -296,6 +308,10 @@ public class VolumeSet {
             ex);
       }
     }
+
+    if (shutdownHook != null) {
+      ShutdownHookManager.get().removeShutdownHook(shutdownHook);
+    }
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index e10d1d4..2c7563e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.fs.CachingGetSpaceUsed;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.GetSpaceUsed;
 import org.apache.hadoop.io.IOUtils;
-import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
-import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -49,7 +47,6 @@ public class VolumeUsage {
   private final DF df;
   private final File scmUsedFile;
   private GetSpaceUsed scmUsage;
-  private Runnable shutdownHook;
 
   private static final String DU_CACHE_FILE = "scmUsed";
   private volatile boolean scmUsedSaved = false;
@@ -72,15 +69,6 @@ public class VolumeUsage {
         .setConf(conf)
         .setInitialUsed(loadScmUsed())
         .build();
-
-    // Ensure scm df is saved during shutdown.
-    shutdownHook = () -> {
-      if (!scmUsedSaved) {
-        saveScmUsed();
-      }
-    };
-    ShutdownHookManager.get().addShutdownHook(shutdownHook,
-        SHUTDOWN_HOOK_PRIORITY);
   }
 
   long getCapacity() {
@@ -106,11 +94,6 @@ public class VolumeUsage {
 
   public void shutdown() {
     saveScmUsed();
-    scmUsedSaved = true;
-
-    if (shutdownHook != null) {
-      ShutdownHookManager.get().removeShutdownHook(shutdownHook);
-    }
 
     if (scmUsage instanceof CachingGetSpaceUsed) {
       IOUtils.cleanupWithLogger(null, ((CachingGetSpaceUsed) scmUsage));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 4f75b9a..3ee9343 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -34,8 +34,10 @@ import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -87,6 +89,7 @@ public class TestVolumeSet {
     for (HddsVolume volume : volumes) {
       FileUtils.deleteDirectory(volume.getHddsRootDir());
     }
+    volumeSet.shutdown();
   }
 
   private boolean checkVolumeExistsInVolumeSet(String volume) {
@@ -120,9 +123,6 @@ public class TestVolumeSet {
 
     // Add a volume to VolumeSet
     String volume3 = baseDir + "disk3";
-//    File dir3 = new File(volume3, "hdds");
-//    File[] files = dir3.listFiles();
-//    System.out.println("------ " + files[0].getPath());
     boolean success = volumeSet.addVolume(volume3);
 
     assertTrue(success);
@@ -204,4 +204,24 @@ public class TestVolumeSet {
     File volume = new File(volume3);
     FileUtils.deleteDirectory(volume);
   }
+
+  @Test
+  public void testShutdown() throws Exception {
+    List<HddsVolume> volumesList = volumeSet.getVolumesList();
+
+    volumeSet.shutdown();
+
+    // Verify that the volumes are shutdown and the volumeUsage is set to null.
+    for (HddsVolume volume : volumesList) {
+      Assert.assertNull(volume.getVolumeInfo().getUsageForTesting());
+      try {
+        // getAvailable() should throw null pointer exception as usage is null.
+        volume.getAvailable();
+        fail("Volume shutdown failed.");
+      } catch (NullPointerException ex) {
+        // Do Nothing. Exception is expected.
+      }
+    }
+
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] hadoop git commit: MAPREDUCE-7118. Distributed cache conflicts breaks backwards compatability. (Jason Lowe via wangda)

Posted by in...@apache.org.
MAPREDUCE-7118. Distributed cache conflicts breaks backwards compatability. (Jason Lowe via wangda)

Change-Id: I89ab4852b4ad305fec19812e8931c59d96581376


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3b4d4cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3b4d4cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3b4d4cc

Branch: refs/heads/HADOOP-15461
Commit: b3b4d4ccb53fdf8dacc66e912822b34f8b3bf215
Parents: 2564884
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Jul 19 12:03:24 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Jul 19 14:26:05 2018 -0700

----------------------------------------------------------------------
 .../mapreduce/v2/util/LocalResourceBuilder.java |  8 +++-----
 .../hadoop/mapreduce/v2/util/TestMRApps.java    | 20 ++++++++++++++++++--
 2 files changed, 21 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3b4d4cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
index 48b157e..48cc29e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.InvalidJobConfException;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.yarn.api.records.LocalResource;
@@ -144,10 +143,9 @@ class LocalResourceBuilder {
 
         LocalResource orig = localResources.get(linkName);
         if(orig != null && !orig.getResource().equals(URL.fromURI(p.toUri()))) {
-          throw new InvalidJobConfException(
-              getResourceDescription(orig.getType()) + orig.getResource()
-                  +
-              " conflicts with " + getResourceDescription(type) + u);
+          LOG.warn(getResourceDescription(orig.getType()) + orig.getResource()
+              + " conflicts with " + getResourceDescription(type) + u);
+          continue;
         }
         Boolean sharedCachePolicy = sharedCacheUploadPolicies.get(u.toString());
         sharedCachePolicy =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3b4d4cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 3aadd63..c6a2874 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -360,7 +360,7 @@ public class TestMRApps {
   }
   
   @SuppressWarnings("deprecation")
-  @Test(timeout = 120000, expected = InvalidJobConfException.class)
+  @Test(timeout = 120000)
   public void testSetupDistributedCacheConflicts() throws Exception {
     Configuration conf = new Configuration();
     conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
@@ -388,10 +388,18 @@ public class TestMRApps {
     Map<String, LocalResource> localResources = 
       new HashMap<String, LocalResource>();
     MRApps.setupDistributedCache(conf, localResources);
+
+    assertEquals(1, localResources.size());
+    LocalResource lr = localResources.get("something");
+    //Archive wins
+    assertNotNull(lr);
+    assertEquals(10l, lr.getSize());
+    assertEquals(10l, lr.getTimestamp());
+    assertEquals(LocalResourceType.ARCHIVE, lr.getType());
   }
   
   @SuppressWarnings("deprecation")
-  @Test(timeout = 120000, expected = InvalidJobConfException.class)
+  @Test(timeout = 120000)
   public void testSetupDistributedCacheConflictsFiles() throws Exception {
     Configuration conf = new Configuration();
     conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
@@ -416,6 +424,14 @@ public class TestMRApps {
     Map<String, LocalResource> localResources = 
       new HashMap<String, LocalResource>();
     MRApps.setupDistributedCache(conf, localResources);
+
+    assertEquals(1, localResources.size());
+    LocalResource lr = localResources.get("something");
+    //First one wins
+    assertNotNull(lr);
+    assertEquals(10l, lr.getSize());
+    assertEquals(10l, lr.getTimestamp());
+    assertEquals(LocalResourceType.FILE, lr.getType());
   }
   
   @SuppressWarnings("deprecation")


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] hadoop git commit: HADOOP-15586. Fix wrong log statement in AbstractService. (Szilard Nemeth via Haibo Chen)

Posted by in...@apache.org.
HADOOP-15586. Fix wrong log statement in AbstractService. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17e26163
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17e26163
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17e26163

Branch: refs/heads/HADOOP-15461
Commit: 17e26163ec1b71cd13a6a82150aca94283f10ed1
Parents: 9d3c39e
Author: Haibo Chen <ha...@apache.org>
Authored: Mon Jul 23 11:18:25 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Mon Jul 23 11:18:25 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/service/AbstractService.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17e26163/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
index 70de647..5b96fbf4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
@@ -254,7 +254,7 @@ public abstract class AbstractService implements Service {
    * @param exception the exception
    */
   protected final void noteFailure(Exception exception) {
-    LOG.debug("noteFailure {}" + exception);
+    LOG.debug("noteFailure", exception);
     if (exception == null) {
       //make sure failure logic doesn't itself cause problems
       return;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] hadoop git commit: HDDS-250. Cleanup ContainerData.

Posted by in...@apache.org.
HDDS-250. Cleanup ContainerData.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de894d34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de894d34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de894d34

Branch: refs/heads/HADOOP-15461
Commit: de894d34f6739685f32cd63a0e26b0e45bcf5c8c
Parents: 89a0f80
Author: Hanisha Koneru <ha...@apache.org>
Authored: Fri Jul 20 11:36:42 2018 -0700
Committer: Hanisha Koneru <ha...@apache.org>
Committed: Fri Jul 20 11:36:42 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/ozone/OzoneConsts.java    |  5 +-
 .../org/apache/hadoop/ozone/common/Storage.java |  2 +-
 .../common/helpers/ContainerUtils.java          | 21 -------
 .../container/common/impl/ContainerData.java    | 64 ++------------------
 .../container/keyvalue/KeyValueContainer.java   |  6 +-
 .../keyvalue/KeyValueContainerData.java         | 56 +++++------------
 .../helpers/KeyValueContainerLocationUtil.java  | 18 +++---
 .../background/BlockDeletingService.java        |  8 +--
 .../container/ozoneimpl/ContainerReader.java    | 30 ++++-----
 .../common/impl/TestContainerDataYaml.java      |  4 +-
 .../test/resources/additionalfields.container   |  4 +-
 .../src/test/resources/incorrect.container      |  4 +-
 .../common/impl/TestContainerPersistence.java   | 15 +++--
 13 files changed, 71 insertions(+), 166 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 0db5993..25b68e0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -70,12 +70,9 @@ public final class OzoneConsts {
   public static final String CONTAINER_EXTENSION = ".container";
   public static final String CONTAINER_META = ".meta";
 
-  //  container storage is in the following format.
-  //  Data Volume basePath/containers/<containerName>/metadata and
-  //  Data Volume basePath/containers/<containerName>/data/...
+  // Refer to {@link ContainerReader} for container storage layout on disk.
   public static final String CONTAINER_PREFIX  = "containers";
   public static final String CONTAINER_META_PATH = "metadata";
-  public static final String CONTAINER_DATA_PATH = "data";
   public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
   public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
   public static final String CONTAINER_ROOT_PREFIX = "repository";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index e8f41a6..1826a58 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -47,7 +47,7 @@ public abstract class Storage {
 
   public static final String STORAGE_DIR_CURRENT = "current";
   protected static final String STORAGE_FILE_VERSION = "VERSION";
-  public static final String CONTAINER_DIR = "containerdir";
+  public static final String CONTAINER_DIR = "containerDir";
 
   private final NodeType nodeType;
   private final File root;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 18a5231..1d5dfc5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -104,27 +104,6 @@ public final class ContainerUtils {
   }
 
   /**
-   * Returns a ReadContainer Response.
-   * @param msg requestProto message.
-   * @param containerData container data to be returned.
-   * @return ReadContainer Response
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-    getReadContainerResponse(ContainerProtos.ContainerCommandRequestProto msg,
-      ContainerData containerData) {
-    Preconditions.checkNotNull(containerData);
-
-    ContainerProtos.ReadContainerResponseProto.Builder response =
-        ContainerProtos.ReadContainerResponseProto.newBuilder();
-    response.setContainerData(containerData.getProtoBufMessage());
-
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        getSuccessResponseBuilder(msg);
-    builder.setReadContainer(response);
-    return builder.build();
-  }
-
-  /**
    * We found a command type but no associated payload for the command. Hence
    * return malformed Command as response.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 54b186b..a7e2b55 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.common.impl;
 
 import com.google.common.base.Preconditions;
+import java.util.List;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
     ContainerType;
@@ -38,7 +39,7 @@ import static java.lang.Math.max;
  * ContainerData is the in-memory representation of container metadata and is
  * represented on disk by the .container file.
  */
-public class ContainerData {
+public abstract class ContainerData {
 
   //Type of the container.
   // For now, we support only KeyValueContainer.
@@ -47,9 +48,6 @@ public class ContainerData {
   // Unique identifier for the container
   private final long containerID;
 
-  // Path to container root dir.
-  private String containerPath;
-
   // Layout version of the container data
   private final int layOutVersion;
 
@@ -85,7 +83,7 @@ public class ContainerData {
    * @param containerId - ContainerId
    * @param size - container maximum size
    */
-  public ContainerData(ContainerType type, long containerId, int size) {
+  protected ContainerData(ContainerType type, long containerId, int size) {
     this(type, containerId,
         ChunkLayOutVersion.getLatestVersion().getVersion(), size);
   }
@@ -97,7 +95,7 @@ public class ContainerData {
    * @param layOutVersion - Container layOutVersion
    * @param size - Container maximum size
    */
-  public ContainerData(ContainerType type, long containerId,
+  protected ContainerData(ContainerType type, long containerId,
     int layOutVersion, int size) {
     Preconditions.checkNotNull(type);
 
@@ -128,17 +126,7 @@ public class ContainerData {
    * Returns the path to base dir of the container.
    * @return Path to base dir.
    */
-  public String getContainerPath() {
-    return containerPath;
-  }
-
-  /**
-   * Set the base dir path of the container.
-   * @param baseDir path to base dir
-   */
-  public void setContainerPath(String baseDir) {
-    this.containerPath = baseDir;
-  }
+  public abstract String getContainerPath();
 
   /**
    * Returns the type of the container.
@@ -388,20 +376,6 @@ public class ContainerData {
   }
 
   /**
-   * Returns container metadata path.
-   */
-  public String getMetadataPath() {
-    return null;
-  }
-
-  /**
-   * Returns container data path.
-   */
-  public String getDataPath() {
-    return null;
-  }
-
-  /**
    * Increase the count of pending deletion blocks.
    *
    * @param numBlocks increment number
@@ -431,33 +405,7 @@ public class ContainerData {
    *
    * @return Protocol Buffer Message
    */
-  public ContainerProtos.ContainerData getProtoBufMessage() {
-    ContainerProtos.ContainerData.Builder builder =
-        ContainerProtos.ContainerData.newBuilder();
-
-    builder.setContainerID(this.getContainerID());
-
-    if (this.containerPath != null) {
-      builder.setContainerPath(this.containerPath);
-    }
-
-    builder.setState(this.getState());
-
-    for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      ContainerProtos.KeyValue.Builder keyValBuilder =
-          ContainerProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-
-    if (this.getBytesUsed() >= 0) {
-      builder.setBytesUsed(this.getBytesUsed());
-    }
-
-    builder.setContainerType(containerType);
-
-    return builder.build();
-  }
+  public abstract ContainerProtos.ContainerData getProtoBufMessage();
 
   /**
    * Sets deleteTransactionId to latest delete transactionId for the container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 155a988..f381e24 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -111,15 +111,15 @@ public class KeyValueContainer implements Container {
     try {
       HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
           .getVolumesList(), maxSize);
-      String containerBasePath = containerVolume.getHddsRootDir().toString();
+      String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
 
       long containerId = containerData.getContainerID();
       String containerName = Long.toString(containerId);
 
       containerMetaDataPath = KeyValueContainerLocationUtil
-          .getContainerMetaDataPath(containerBasePath, scmId, containerId);
+          .getContainerMetaDataPath(hddsVolumeDir, scmId, containerId);
       File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
-          containerBasePath, scmId, containerId);
+          hddsVolumeDir, scmId, containerId);
       File containerFile = KeyValueContainerLocationUtil.getContainerFile(
           containerMetaDataPath, containerName);
       File containerCheckSumFile = KeyValueContainerLocationUtil

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index d9ae38a..3e3cc77 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.yaml.snakeyaml.nodes.Tag;
 
@@ -73,9 +74,6 @@ public class KeyValueContainerData extends ContainerData {
   //Type of DB used to store key to chunks mapping
   private String containerDBType;
 
-  //Number of pending deletion blocks in container.
-  private int numPendingDeletionBlocks;
-
   private File dbFile = null;
 
   /**
@@ -85,7 +83,6 @@ public class KeyValueContainerData extends ContainerData {
    */
   public KeyValueContainerData(long id, int size) {
     super(ContainerProtos.ContainerType.KeyValueContainer, id, size);
-    this.numPendingDeletionBlocks = 0;
   }
 
   /**
@@ -97,7 +94,6 @@ public class KeyValueContainerData extends ContainerData {
   public KeyValueContainerData(long id, int layOutVersion, int size) {
     super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion,
         size);
-    this.numPendingDeletionBlocks = 0;
   }
 
 
@@ -120,8 +116,8 @@ public class KeyValueContainerData extends ContainerData {
 
   /**
    * Returns container metadata path.
+   * @return - Physical path where container file and checksum is stored.
    */
-  @Override
   public String getMetadataPath() {
     return metadataPath;
   }
@@ -136,18 +132,21 @@ public class KeyValueContainerData extends ContainerData {
   }
 
   /**
-   * Get chunks path.
-   * @return - Physical path where container file and checksum is stored.
+   * Returns the path to base dir of the container.
+   * @return Path to base dir
    */
-  public String getChunksPath() {
-    return chunksPath;
+  public String getContainerPath() {
+    if (metadataPath == null) {
+      return null;
+    }
+    return new File(metadataPath).getParent();
   }
 
   /**
-   * Returns container chunks path.
+   * Get chunks path.
+   * @return - Path where chunks are stored
    */
-  @Override
-  public String getDataPath() {
+  public String getChunksPath() {
     return chunksPath;
   }
 
@@ -176,33 +175,6 @@ public class KeyValueContainerData extends ContainerData {
   }
 
   /**
-   * Returns the number of pending deletion blocks in container.
-   * @return numPendingDeletionBlocks
-   */
-  public int getNumPendingDeletionBlocks() {
-    return numPendingDeletionBlocks;
-  }
-
-
-  /**
-   * Increase the count of pending deletion blocks.
-   *
-   * @param numBlocks increment number
-   */
-  public void incrPendingDeletionBlocks(int numBlocks) {
-    this.numPendingDeletionBlocks += numBlocks;
-  }
-
-  /**
-   * Decrease the count of pending deletion blocks.
-   *
-   * @param numBlocks decrement number
-   */
-  public void decrPendingDeletionBlocks(int numBlocks) {
-    this.numPendingDeletionBlocks -= numBlocks;
-  }
-
-  /**
    * Returns a ProtoBuf Message from ContainerData.
    *
    * @return Protocol Buffer Message
@@ -260,7 +232,9 @@ public class KeyValueContainerData extends ContainerData {
     }
 
     if (protoData.hasContainerPath()) {
-      data.setContainerPath(protoData.getContainerPath());
+      String metadataPath = protoData.getContainerPath()+ File.separator +
+          OzoneConsts.CONTAINER_META_PATH;
+      data.setMetadataPath(metadataPath);
     }
 
     if (protoData.hasState()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
index 4710c51..868b9f4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
@@ -34,14 +34,16 @@ public final class KeyValueContainerLocationUtil {
   }
   /**
    * Returns Container Metadata Location.
-   * @param baseDir
+   * @param hddsVolumeDir base dir of the hdds volume where scm directories
+   *                      are stored
    * @param scmId
    * @param containerId
-   * @return containerMetadata Path
+   * @return containerMetadata Path to container metadata location where
+   * .container file will be stored.
    */
-  public static File getContainerMetaDataPath(String baseDir, String scmId,
+  public static File getContainerMetaDataPath(String hddsVolumeDir, String scmId,
                                               long containerId) {
-    String containerMetaDataPath = getBaseContainerLocation(baseDir, scmId,
+    String containerMetaDataPath = getBaseContainerLocation(hddsVolumeDir, scmId,
         containerId);
     containerMetaDataPath = containerMetaDataPath + File.separator +
         OzoneConsts.CONTAINER_META_PATH;
@@ -65,21 +67,21 @@ public final class KeyValueContainerLocationUtil {
 
   /**
    * Returns base directory for specified container.
-   * @param baseDir
+   * @param hddsVolumeDir
    * @param scmId
    * @param containerId
    * @return base directory for container.
    */
-  private static String getBaseContainerLocation(String baseDir, String scmId,
+  private static String getBaseContainerLocation(String hddsVolumeDir, String scmId,
                                         long containerId) {
-    Preconditions.checkNotNull(baseDir, "Base Directory cannot be null");
+    Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null");
     Preconditions.checkNotNull(scmId, "scmUuid cannot be null");
     Preconditions.checkState(containerId >= 0,
         "Container Id cannot be negative.");
 
     String containerSubDirectory = getContainerSubDirectory(containerId);
 
-    String containerMetaDataPath = baseDir  + File.separator + scmId +
+    String containerMetaDataPath = hddsVolumeDir  + File.separator + scmId +
         File.separator + Storage.STORAGE_DIR_CURRENT + File.separator +
         containerSubDirectory + File.separator + containerId;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 151ef94..a3e36f4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -172,11 +172,11 @@ public class BlockDeletingService extends BackgroundService{
       implements BackgroundTask<BackgroundTaskResult> {
 
     private final int priority;
-    private final ContainerData containerData;
+    private final KeyValueContainerData containerData;
 
     BlockDeletingTask(ContainerData containerName, int priority) {
       this.priority = priority;
-      this.containerData = containerName;
+      this.containerData = (KeyValueContainerData) containerName;
     }
 
     @Override
@@ -199,10 +199,10 @@ public class BlockDeletingService extends BackgroundService{
       List<String> succeedBlocks = new LinkedList<>();
       LOG.debug("Container : {}, To-Delete blocks : {}",
           containerData.getContainerID(), toDeleteBlocks.size());
-      File dataDir = new File(containerData.getDataPath());
+      File dataDir = new File(containerData.getChunksPath());
       if (!dataDir.exists() || !dataDir.isDirectory()) {
         LOG.error("Invalid container data dir {} : "
-            + "not exist or not a directory", dataDir.getAbsolutePath());
+            + "does not exist or not a directory", dataDir.getAbsolutePath());
         return crr;
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index 06e49f0..986aa16 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -40,6 +40,21 @@ import java.io.IOException;
 
 /**
  * Class used to read .container files from Volume and build container map.
+ *
+ * Layout of the container directory on disk is as follows:
+ *
+ * ../hdds/VERSION
+ * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/metadata/<<containerID>>.container
+ * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/metadata/<<containerID>>.checksum
+ * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/metadata/<<containerID>>.db
+ * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/<<dataPath>>
+ *
+ * Note that the <<dataPath>> is dependent on the ContainerType.
+ * For KeyValueContainers, the data is stored in a "chunks" folder. As such,
+ * the <<dataPath>> layout for KeyValueContainers is
+ *
+ * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/chunks/<<chunksFile>>
+ *
  */
 public class ContainerReader implements Runnable {
 
@@ -73,21 +88,6 @@ public class ContainerReader implements Runnable {
     Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
         "cannot be null");
 
-
-    /**
-     *
-     * layout of the container directory on the disk.
-     * /hdds/<<scmUuid>>/current/<<containerdir>>/</containerID>/metadata
-     * /<<containerID>>.container
-     * /hdds/<<scmUuid>>/current/<<containerdir>>/<<containerID>>/metadata
-     * /<<containerID>>.checksum
-     * /hdds/<<scmUuid>>/current/<<containerdir>>/<<containerID>>/metadata
-     * /<<containerID>>.db
-     * /hdds/<<scmUuid>>/current/<<containerdir>>/<<containerID>>/chunks
-     * /<<chunkFile>>
-     *
-     **/
-
     //filtering scm directory
     File[] scmDir = hddsVolumeRootDir.listFiles(new FileFilter() {
       @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index eed5606..d734271 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -151,9 +151,9 @@ public class TestContainerDataYaml {
       assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
           .getContainerType());
       assertEquals(9223372036854775807L, kvData.getContainerID());
-      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerdir0/1", kvData
+      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
           .getChunksPath());
-      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerdir0/1", kvData
+      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
           .getMetadataPath());
       assertEquals(1, kvData.getLayOutVersion());
       assertEquals(2, kvData.getMetadata().size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/test/resources/additionalfields.container
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/resources/additionalfields.container b/hadoop-hdds/container-service/src/test/resources/additionalfields.container
index 38c104a..f437a95 100644
--- a/hadoop-hdds/container-service/src/test/resources/additionalfields.container
+++ b/hadoop-hdds/container-service/src/test/resources/additionalfields.container
@@ -1,9 +1,9 @@
 !<KeyValueContainerData>
 containerDBType: RocksDB
-chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
+chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
 containerID: 9223372036854775807
 containerType: KeyValueContainer
-metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
+metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
 layOutVersion: 1
 maxSizeGB: 5
 metadata: {OWNER: ozone, VOLUME: hdfs}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/test/resources/incorrect.container
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.container b/hadoop-hdds/container-service/src/test/resources/incorrect.container
index abbb6aa..38a8857 100644
--- a/hadoop-hdds/container-service/src/test/resources/incorrect.container
+++ b/hadoop-hdds/container-service/src/test/resources/incorrect.container
@@ -1,9 +1,9 @@
 !<KeyValueContainerData>
 containerDBType: RocksDB
-chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
+chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
 containerID: 9223372036854775807
 containerType: KeyValueContainer
-metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
+metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
 layOutVersion: 1
 maxSizeGB: 5
 metadata: {OWNER: ozone, VOLUME: hdfs}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index e634dd8..d29937e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -92,6 +92,9 @@ import static org.junit.Assert.fail;
 
 /**
  * Simple tests to verify that container persistence works as expected.
+ * Some of these tests are specific to {@link KeyValueContainer}. If a new
+ * {@link ContainerProtos.ContainerType} is added, the tests need to be
+ * modified.
  */
 public class TestContainerPersistence {
   @Rule
@@ -409,9 +412,10 @@ public class TestContainerPersistence {
       fileHashMap.put(fileName, info);
     }
 
-    ContainerData cNewData = container.getContainerData();
+    KeyValueContainerData cNewData =
+        (KeyValueContainerData) container.getContainerData();
     Assert.assertNotNull(cNewData);
-    Path dataDir = Paths.get(cNewData.getDataPath());
+    Path dataDir = Paths.get(cNewData.getChunksPath());
 
     String globFormat = String.format("%s.data.*", blockID.getLocalID());
     MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
@@ -707,7 +711,8 @@ public class TestContainerPersistence {
   @Test
   public void testUpdateContainer() throws IOException {
     long testContainerID = ContainerTestHelper.getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
+    KeyValueContainer container =
+        (KeyValueContainer) addContainer(containerSet, testContainerID);
 
     File orgContainerFile = KeyValueContainerLocationUtil.getContainerFile(
         new File(container.getContainerData().getMetadataPath()),
@@ -725,7 +730,7 @@ public class TestContainerPersistence {
         .containsKey(testContainerID));
 
     // Verify in-memory map
-    ContainerData actualNewData =
+    KeyValueContainerData actualNewData = (KeyValueContainerData)
         containerSet.getContainer(testContainerID).getContainerData();
     Assert.assertEquals("shire_new",
         actualNewData.getMetadata().get("VOLUME"));
@@ -766,7 +771,7 @@ public class TestContainerPersistence {
     container.update(newMetadata, true);
 
     // Verify in-memory map
-    actualNewData =
+    actualNewData = (KeyValueContainerData)
         containerSet.getContainer(testContainerID).getContainerData();
     Assert.assertEquals("shire_new_1",
         actualNewData.getMetadata().get("VOLUME"));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] hadoop git commit: HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera)

Posted by in...@apache.org.
HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bec03cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bec03cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bec03cf

Branch: refs/heads/HADOOP-15461
Commit: 6bec03cfc8bdcf6aa3df9c22231ab959ba31f2f5
Parents: ea2c6c8
Author: Gera Shegalov <ge...@apache.org>
Authored: Tue Jul 17 00:05:39 2018 -0700
Committer: Gera Shegalov <ge...@apache.org>
Committed: Tue Jul 24 14:32:30 2018 -0700

----------------------------------------------------------------------
 .../hadoop/io/file/tfile/Compression.java       | 31 +++++++++++-------
 .../hadoop/io/file/tfile/TestCompression.java   | 34 +++++++++++++++++++-
 2 files changed, 53 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bec03cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index fa85ed7..c4347e0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -5,9 +5,9 @@
  * licenses this file to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
- * 
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@@ -24,6 +24,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -78,25 +79,33 @@ public final class Compression {
   public enum Algorithm {
     LZO(TFile.COMPRESSION_LZO) {
       private transient boolean checked = false;
+      private transient ClassNotFoundException cnf;
+      private transient boolean reinitCodecInTests;
       private static final String defaultClazz =
           "org.apache.hadoop.io.compress.LzoCodec";
+      private transient String clazz;
       private transient CompressionCodec codec = null;
 
+      private String getLzoCodecClass() {
+        String extClazzConf = conf.get(CONF_LZO_CLASS);
+        String extClazz = (extClazzConf != null) ?
+            extClazzConf : System.getProperty(CONF_LZO_CLASS);
+        return (extClazz != null) ? extClazz : defaultClazz;
+      }
+
       @Override
       public synchronized boolean isSupported() {
-        if (!checked) {
+        if (!checked || reinitCodecInTests) {
           checked = true;
-          String extClazzConf = conf.get(CONF_LZO_CLASS);
-          String extClazz = (extClazzConf != null) ?
-              extClazzConf : System.getProperty(CONF_LZO_CLASS);
-          String clazz = (extClazz != null) ? extClazz : defaultClazz;
+          reinitCodecInTests = conf.getBoolean("test.reload.lzo.codec", false);
+          clazz = getLzoCodecClass();
           try {
             LOG.info("Trying to load Lzo codec class: " + clazz);
             codec =
                 (CompressionCodec) ReflectionUtils.newInstance(Class
                     .forName(clazz), conf);
           } catch (ClassNotFoundException e) {
-            // that is okay
+            cnf = e;
           }
         }
         return codec != null;
@@ -105,9 +114,9 @@ public final class Compression {
       @Override
       CompressionCodec getCodec() throws IOException {
         if (!isSupported()) {
-          throw new IOException(
-              "LZO codec class not specified. Did you forget to set property "
-                  + CONF_LZO_CLASS + "?");
+          throw new IOException(String.format(
+              "LZO codec %s=%s could not be loaded", CONF_LZO_CLASS, clazz),
+                  cnf);
         }
 
         return codec;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bec03cf/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
index ff6c72a..b1bf077 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
@@ -17,14 +17,28 @@
  */
 package org.apache.hadoop.io.file.tfile;
 
-import org.junit.Test;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.*;
 
 import java.io.IOException;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class TestCompression {
 
+  @BeforeClass
+  public static void resetConfigBeforeAll() {
+    Compression.Algorithm.LZO.conf.setBoolean("test.reload.lzo.codec", true);
+  }
+
+  @AfterClass
+  public static void resetConfigAfterAll() {
+    Compression.Algorithm.LZO.conf.setBoolean("test.reload.lzo.codec", false);
+  }
+
   /**
    * Regression test for HADOOP-11418.
    * Verify we can set a LZO codec different from default LZO codec.
@@ -38,4 +52,22 @@ public class TestCompression {
     assertEquals(defaultCodec,
         Compression.Algorithm.LZO.getCodec().getClass().getName());
   }
+
+
+  @Test
+  public void testMisconfiguredLZOCodec() throws Exception {
+    // Dummy codec
+    String defaultCodec = "org.apache.hadoop.io.compress.InvalidLzoCodec";
+    Compression.Algorithm.conf.set(
+        Compression.Algorithm.CONF_LZO_CLASS, defaultCodec);
+    IOException ioEx = LambdaTestUtils.intercept(
+        IOException.class,
+        defaultCodec,
+        () -> Compression.Algorithm.LZO.getCodec());
+
+    if (!(ioEx.getCause() instanceof ClassNotFoundException)) {
+      throw ioEx;
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] hadoop git commit: HDDS-272. TestBlockDeletingService is failing with DiskOutOfSpaceException. Contributed by Lokesh Jain.

Posted by in...@apache.org.
HDDS-272. TestBlockDeletingService is failing with DiskOutOfSpaceException. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/773d312f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/773d312f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/773d312f

Branch: refs/heads/HADOOP-15461
Commit: 773d312f7412d5050c106ed3a1cd0d1934bfa2e0
Parents: ff7c2ed
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Tue Jul 24 21:23:20 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Tue Jul 24 21:23:20 2018 +0530

----------------------------------------------------------------------
 .../container/keyvalue/KeyValueHandler.java     |  2 +-
 .../background/BlockDeletingService.java        |  9 +-
 .../testutils/BlockDeletingServiceTestImpl.java |  3 +-
 .../common/TestBlockDeletingService.java        | 90 ++++++++++++--------
 4 files changed, 60 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 9aa3df7..d3a1ca4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -136,7 +136,7 @@ public class KeyValueHandler extends Handler {
             TimeUnit.MILLISECONDS);
     this.blockDeletingService =
         new BlockDeletingService(containerSet, svcInterval, serviceTimeout,
-            config);
+            TimeUnit.MILLISECONDS, config);
     blockDeletingService.start();
     // TODO: Add supoort for different volumeChoosingPolicies.
     volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index a3e36f4..4a572ca 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -88,11 +88,10 @@ public class BlockDeletingService extends BackgroundService{
   // Core pool size for container tasks
   private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
 
-  public BlockDeletingService(ContainerSet containerSet,
-      long serviceInterval, long serviceTimeout, Configuration conf) {
-    super("BlockDeletingService", serviceInterval,
-        TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE,
-        serviceTimeout);
+  public BlockDeletingService(ContainerSet containerSet, long serviceInterval,
+      long serviceTimeout, TimeUnit timeUnit, Configuration conf) {
+    super("BlockDeletingService", serviceInterval, timeUnit,
+        BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
     this.containerSet = containerSet;
     containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass(
         ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
index a87f655..115b5e2 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
@@ -44,7 +44,8 @@ public class BlockDeletingServiceTestImpl
 
   public BlockDeletingServiceTestImpl(ContainerSet containerSet,
       int serviceInterval, Configuration conf) {
-    super(containerSet, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS, conf);
+    super(containerSet, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
+        TimeUnit.MILLISECONDS, conf);
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 1ddd39a..a6e53c2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -45,17 +45,17 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.utils.BackgroundService;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
+import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.BeforeClass;
-import org.junit.Before;
-import org.junit.After;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
 import java.nio.charset.Charset;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
@@ -72,35 +72,28 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
 /**
  * Tests to test block deleting service.
  */
-// TODO: Fix BlockDeletingService to work with new StorageLayer
 public class TestBlockDeletingService {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(TestBlockDeletingService.class);
 
   private static File testRoot;
-  private static File containersDir;
-  private static File chunksDir;
+  private static String scmId;
+  private static String clusterID;
 
   @BeforeClass
-  public static void init() {
+  public static void init() throws IOException {
     testRoot = GenericTestUtils
         .getTestDir(TestBlockDeletingService.class.getSimpleName());
-    chunksDir = new File(testRoot, "chunks");
-    containersDir = new File(testRoot, "containers");
-  }
-
-  @Before
-  public void setup() throws IOException {
-    if (chunksDir.exists()) {
-      FileUtils.deleteDirectory(chunksDir);
+    if (testRoot.exists()) {
+      FileUtils.cleanDirectory(testRoot);
     }
+    scmId = UUID.randomUUID().toString();
+    clusterID = UUID.randomUUID().toString();
   }
 
-  @After
-  public void cleanup() throws IOException {
-    FileUtils.deleteDirectory(chunksDir);
-    FileUtils.deleteDirectory(containersDir);
+  @AfterClass
+  public static void cleanup() throws IOException {
     FileUtils.deleteDirectory(testRoot);
   }
 
@@ -111,14 +104,15 @@ public class TestBlockDeletingService {
    */
   private void createToDeleteBlocks(ContainerSet containerSet,
       Configuration conf, int numOfContainers, int numOfBlocksPerContainer,
-      int numOfChunksPerBlock, File chunkDir) throws IOException {
+      int numOfChunksPerBlock) throws IOException {
     for (int x = 0; x < numOfContainers; x++) {
+      conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
       long containerID = ContainerTestHelper.getTestContainerID();
       KeyValueContainerData data = new KeyValueContainerData(containerID,
           ContainerTestHelper.CONTAINER_MAX_SIZE_GB);
       Container container = new KeyValueContainer(data, conf);
-      container.create(new VolumeSet(UUID.randomUUID().toString(), conf),
-          new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
+      container.create(new VolumeSet(scmId, clusterID, conf),
+          new RoundRobinVolumeChoosingPolicy(), scmId);
       containerSet.addContainer(container);
       data = (KeyValueContainerData) containerSet.getContainer(
           containerID).getContainerData();
@@ -133,7 +127,7 @@ public class TestBlockDeletingService {
         for (int k = 0; k<numOfChunksPerBlock; k++) {
           // offset doesn't matter here
           String chunkName = blockID.getLocalID() + "_chunk_" + k;
-          File chunk = new File(chunkDir, chunkName);
+          File chunk = new File(data.getChunksPath(), chunkName);
           FileUtils.writeStringToFile(chunk, "a chunk",
               Charset.defaultCharset());
           LOG.info("Creating file {}", chunk.getAbsolutePath());
@@ -193,7 +187,7 @@ public class TestBlockDeletingService {
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
     ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 1, 3, 1, chunksDir);
+    createToDeleteBlocks(containerSet, conf, 1, 3, 1);
 
     BlockDeletingServiceTestImpl svc =
         new BlockDeletingServiceTestImpl(containerSet, 1000, conf);
@@ -208,12 +202,13 @@ public class TestBlockDeletingService {
     MetadataStore meta = KeyUtils.getDB(
         (KeyValueContainerData) containerData.get(0), conf);
     Map<Long, Container> containerMap = containerSet.getContainerMap();
+    long transactionId = containerMap.get(containerData.get(0).getContainerID())
+        .getContainerData().getDeleteTransactionId();
 
 
     // Number of deleted blocks in container should be equal to 0 before
     // block delete
-    // TODO : Implement deleteTransactionID in ContainerData.
-//    Assert.assertEquals(0, transactionId);
+    Assert.assertEquals(0, transactionId);
 
     // Ensure there are 3 blocks under deletion and 0 deleted blocks
     Assert.assertEquals(3, getUnderDeletionBlocksCount(meta));
@@ -247,7 +242,7 @@ public class TestBlockDeletingService {
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10);
     ContainerSet containerSet = new ContainerSet();
     // Create 1 container with 100 blocks
-    createToDeleteBlocks(containerSet, conf, 1, 100, 1, chunksDir);
+    createToDeleteBlocks(containerSet, conf, 1, 100, 1);
 
     BlockDeletingServiceTestImpl service =
         new BlockDeletingServiceTestImpl(containerSet, 1000, conf);
@@ -275,12 +270,13 @@ public class TestBlockDeletingService {
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
     ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 1, 3, 1, chunksDir);
+    createToDeleteBlocks(containerSet, conf, 1, 3, 1);
 
     // set timeout value as 1ns to trigger timeout behavior
     long timeout  = 1;
-    BlockDeletingService svc =
-        new BlockDeletingService(containerSet, 1000, timeout, conf);
+    BlockDeletingService svc = new BlockDeletingService(containerSet,
+        TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS,
+        conf);
     svc.start();
 
     LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG);
@@ -299,8 +295,10 @@ public class TestBlockDeletingService {
 
     // test for normal case that doesn't have timeout limitation
     timeout  = 0;
-    createToDeleteBlocks(containerSet, conf, 1, 3, 1, chunksDir);
-    svc =  new BlockDeletingService(containerSet, 1000, timeout, conf);
+    createToDeleteBlocks(containerSet, conf, 1, 3, 1);
+    svc = new BlockDeletingService(containerSet,
+        TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS,
+        conf);
     svc.start();
 
     // get container meta data
@@ -347,7 +345,7 @@ public class TestBlockDeletingService {
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 1);
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 1);
     ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 2, 1, 10, chunksDir);
+    createToDeleteBlocks(containerSet, conf, 2, 1, 10);
 
     BlockDeletingServiceTestImpl service =
         new BlockDeletingServiceTestImpl(containerSet, 1000, conf);
@@ -357,7 +355,12 @@ public class TestBlockDeletingService {
       GenericTestUtils.waitFor(() -> service.isStarted(), 100, 3000);
       // 1st interval processes 1 container 1 block and 10 chunks
       deleteAndWait(service, 1);
-      Assert.assertEquals(10, chunksDir.listFiles().length);
+      Assert.assertEquals(10, getNumberOfChunksInContainers(containerSet));
+      deleteAndWait(service, 2);
+      deleteAndWait(service, 3);
+      deleteAndWait(service, 4);
+      deleteAndWait(service, 5);
+      Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet));
     } finally {
       service.shutdown();
     }
@@ -383,10 +386,10 @@ public class TestBlockDeletingService {
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
     ContainerSet containerSet = new ContainerSet();
-    createToDeleteBlocks(containerSet, conf, 5, 3, 1, chunksDir);
+    createToDeleteBlocks(containerSet, conf, 5, 3, 1);
 
     // Make sure chunks are created
-    Assert.assertEquals(15, chunksDir.listFiles().length);
+    Assert.assertEquals(15, getNumberOfChunksInContainers(containerSet));
 
     BlockDeletingServiceTestImpl service =
         new BlockDeletingServiceTestImpl(containerSet, 1000, conf);
@@ -399,13 +402,26 @@ public class TestBlockDeletingService {
       // number of containers = 5
       // each interval will at most runDeletingTasks 5 * 2 = 10 blocks
       deleteAndWait(service, 1);
-      Assert.assertEquals(5, chunksDir.listFiles().length);
+      Assert.assertEquals(5, getNumberOfChunksInContainers(containerSet));
 
       // There is only 5 blocks left to runDeletingTasks
       deleteAndWait(service, 2);
-      Assert.assertEquals(0, chunksDir.listFiles().length);
+      Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet));
     } finally {
       service.shutdown();
     }
   }
+
+  private int getNumberOfChunksInContainers(ContainerSet containerSet) {
+    Iterator<Container> containerIterator = containerSet.getContainerIterator();
+    int numChunks = 0;
+    while (containerIterator.hasNext()) {
+      Container container = containerIterator.next();
+      File chunkDir = FileUtils.getFile(
+          ((KeyValueContainerData) container.getContainerData())
+              .getChunksPath());
+      numChunks += chunkDir.listFiles().length;
+    }
+    return numChunks;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] hadoop git commit: HADOOP-15522. Deprecate Shell#ReadLink by using native java code. Contributed by Giovanni Matteo Fumarola.

Posted by in...@apache.org.
HADOOP-15522. Deprecate Shell#ReadLink by using native java code. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/866646eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/866646eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/866646eb

Branch: refs/heads/HADOOP-15461
Commit: 866646eb3bf15d101574d000c41915206e8db713
Parents: b8d2b09
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Jun 11 13:14:34 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Jul 24 18:30:47 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileUtil.java     | 21 +++++++++++---------
 .../main/java/org/apache/hadoop/util/Shell.java |  8 +++++++-
 2 files changed, 19 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/866646eb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 61cb8d2..f3b5d58 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -196,22 +196,25 @@ public class FileUtil {
    *         a symlink.
    */
   public static String readLink(File f) {
-    /* NB: Use readSymbolicLink in java.nio.file.Path once available. Could
-     * use getCanonicalPath in File to get the target of the symlink but that
-     * does not indicate if the given path refers to a symlink.
-     */
 
     if (f == null) {
       LOG.warn("Can not read a null symLink");
       return "";
     }
 
-    try {
-      return Shell.execCommand(
-          Shell.getReadlinkCommand(f.toString())).trim();
-    } catch (IOException x) {
-      return "";
+    if (Files.isSymbolicLink(f.toPath())) {
+      java.nio.file.Path p = null;
+      try {
+        p = Files.readSymbolicLink(f.toPath());
+      } catch (Exception e) {
+        LOG.warn("Exception while reading the symbolic link "
+            + f.getAbsolutePath() + ". Exception= " + e.getMessage());
+        return "";
+      }
+      return p.toAbsolutePath().toString();
     }
+    LOG.warn("The file " + f.getAbsolutePath() + " is not a symbolic link.");
+    return "";
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/866646eb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index e902af0..691df63 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -309,7 +309,13 @@ public abstract class Shell {
        : new String[] { "ln", "-s", target, link };
   }
 
-  /** Return a command to read the target of the a symbolic link. */
+  /**
+   * Return a command to read the target of the a symbolic link.
+   *
+   * Deprecated and likely to be deleted in the near future. Please use
+   * FileUtil.symlink().
+   */
+  @Deprecated
   public static String[] getReadlinkCommand(String link) {
     return WINDOWS ?
         new String[] { getWinUtilsPath(), "readlink", link }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] hadoop git commit: YARN-8501. Reduce complexity of RMWebServices getApps method. Contributed by Szilard Nemeth

Posted by in...@apache.org.
YARN-8501. Reduce complexity of RMWebServices getApps method.
           Contributed by Szilard Nemeth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5836e0a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5836e0a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5836e0a4

Branch: refs/heads/HADOOP-15461
Commit: 5836e0a46bf9793e0a61bb8ec46536f4a67d38d7
Parents: ccf2db7
Author: Eric Yang <ey...@apache.org>
Authored: Thu Jul 19 12:30:38 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu Jul 19 12:30:38 2018 -0400

----------------------------------------------------------------------
 .../hadoop/yarn/server/webapp/WebServices.java  |   2 +-
 .../webapp/ApplicationsRequestBuilder.java      | 231 ++++++++
 .../resourcemanager/webapp/RMWebServices.java   | 145 +----
 .../webapp/TestApplicationsRequestBuilder.java  | 529 +++++++++++++++++++
 4 files changed, 777 insertions(+), 130 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5836e0a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index 03b1055..5bb5448 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -392,7 +392,7 @@ public class WebServices {
     response.setContentType(null);
   }
 
-  protected static Set<String>
+  public static Set<String>
       parseQueries(Set<String> queries, boolean isState) {
     Set<String> params = new HashSet<String>();
     if (!queries.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5836e0a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java
new file mode 100644
index 0000000..876d044
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
+        .CapacityScheduler;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.webapp.WebServices.parseQueries;
+
+public class ApplicationsRequestBuilder {
+
+  private Set<String> statesQuery = Sets.newHashSet();
+  private Set<String> users = Sets.newHashSetWithExpectedSize(1);
+  private Set<String> queues = Sets.newHashSetWithExpectedSize(1);
+  private String limit = null;
+  private Long limitNumber;
+
+  // set values suitable in case both of begin/end not specified
+  private long startedTimeBegin = 0;
+  private long startedTimeEnd = Long.MAX_VALUE;
+  private long finishTimeBegin = 0;
+  private long finishTimeEnd = Long.MAX_VALUE;
+  private Set<String> appTypes = Sets.newHashSet();
+  private Set<String> appTags = Sets.newHashSet();
+  private ResourceManager rm;
+
+  private ApplicationsRequestBuilder() {
+  }
+
+  public static ApplicationsRequestBuilder create() {
+    return new ApplicationsRequestBuilder();
+  }
+
+  public ApplicationsRequestBuilder withStateQuery(String stateQuery) {
+    // stateQuery is deprecated.
+    if (stateQuery != null && !stateQuery.isEmpty()) {
+      statesQuery.add(stateQuery);
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withStatesQuery(
+      Set<String> statesQuery) {
+    if (statesQuery != null) {
+      this.statesQuery.addAll(statesQuery);
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withUserQuery(String userQuery) {
+    if (userQuery != null && !userQuery.isEmpty()) {
+      users.add(userQuery);
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withQueueQuery(ResourceManager rm,
+      String queueQuery) {
+    this.rm = rm;
+    if (queueQuery != null && !queueQuery.isEmpty()) {
+      queues.add(queueQuery);
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withLimit(String limit) {
+    if (limit != null && !limit.isEmpty()) {
+      this.limit = limit;
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withStartedTimeBegin(
+      String startedBegin) {
+    if (startedBegin != null && !startedBegin.isEmpty()) {
+      startedTimeBegin = parseLongValue(startedBegin, "startedTimeBegin");
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withStartedTimeEnd(String startedEnd) {
+    if (startedEnd != null && !startedEnd.isEmpty()) {
+      startedTimeEnd = parseLongValue(startedEnd, "startedTimeEnd");
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withFinishTimeBegin(String finishBegin) {
+    if (finishBegin != null && !finishBegin.isEmpty()) {
+      finishTimeBegin = parseLongValue(finishBegin, "finishedTimeBegin");
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withFinishTimeEnd(String finishEnd) {
+    if (finishEnd != null && !finishEnd.isEmpty()) {
+      finishTimeEnd = parseLongValue(finishEnd, "finishedTimeEnd");
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withApplicationTypes(
+      Set<String> applicationTypes) {
+    if (applicationTypes !=  null) {
+      appTypes = parseQueries(applicationTypes, false);
+    }
+    return this;
+  }
+
+  public ApplicationsRequestBuilder withApplicationTags(
+      Set<String> applicationTags) {
+    if (applicationTags != null) {
+      appTags = parseQueries(applicationTags, false);
+    }
+    return this;
+  }
+
+  private void validate() {
+    queues.forEach(q -> validateQueueExists(rm, q));
+    validateLimit();
+    validateStartTime();
+    validateFinishTime();
+  }
+
+  private void validateQueueExists(ResourceManager rm, String queueQuery) {
+    ResourceScheduler rs = rm.getResourceScheduler();
+    if (rs instanceof CapacityScheduler) {
+      CapacityScheduler cs = (CapacityScheduler) rs;
+      try {
+        cs.getQueueInfo(queueQuery, false, false);
+      } catch (IOException e) {
+        throw new BadRequestException(e.getMessage());
+      }
+    }
+  }
+
+  private void validateLimit() {
+    if (limit != null) {
+      limitNumber = parseLongValue(limit, "limit");
+      if (limitNumber <= 0) {
+        throw new BadRequestException("limit value must be greater then 0");
+      }
+    }
+  }
+
+  private long parseLongValue(String strValue, String queryName) {
+    try {
+      return Long.parseLong(strValue);
+    } catch (NumberFormatException e) {
+      throw new BadRequestException(queryName + " value must be a number!");
+    }
+  }
+
+  private void validateStartTime() {
+    if (startedTimeBegin < 0) {
+      throw new BadRequestException("startedTimeBegin must be greater than 0");
+    }
+    if (startedTimeEnd < 0) {
+      throw new BadRequestException("startedTimeEnd must be greater than 0");
+    }
+    if (startedTimeBegin > startedTimeEnd) {
+      throw new BadRequestException(
+          "startedTimeEnd must be greater than startTimeBegin");
+    }
+  }
+
+  private void validateFinishTime() {
+    if (finishTimeBegin < 0) {
+      throw new BadRequestException("finishTimeBegin must be greater than 0");
+    }
+    if (finishTimeEnd < 0) {
+      throw new BadRequestException("finishTimeEnd must be greater than 0");
+    }
+    if (finishTimeBegin > finishTimeEnd) {
+      throw new BadRequestException(
+          "finishTimeEnd must be greater than finishTimeBegin");
+    }
+  }
+
+  public GetApplicationsRequest build() {
+    validate();
+    GetApplicationsRequest request = GetApplicationsRequest.newInstance();
+
+    Set<String> appStates = parseQueries(statesQuery, true);
+    if (!appStates.isEmpty()) {
+      request.setApplicationStates(appStates);
+    }
+    if (!users.isEmpty()) {
+      request.setUsers(users);
+    }
+    if (!queues.isEmpty()) {
+      request.setQueues(queues);
+    }
+    if (limitNumber != null) {
+      request.setLimit(limitNumber);
+    }
+    request.setStartRange(startedTimeBegin, startedTimeEnd);
+    request.setFinishRange(finishTimeBegin, finishTimeEnd);
+
+    if (!appTypes.isEmpty()) {
+      request.setApplicationTypes(appTypes);
+    }
+    if (!appTags.isEmpty()) {
+      request.setApplicationTags(appTags);
+    }
+
+    return request;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5836e0a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 15b58d7..4527a02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -482,7 +482,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       @QueryParam(RMWSConsts.FINAL_STATUS) String finalStatusQuery,
       @QueryParam(RMWSConsts.USER) String userQuery,
       @QueryParam(RMWSConsts.QUEUE) String queueQuery,
-      @QueryParam(RMWSConsts.LIMIT) String count,
+      @QueryParam(RMWSConsts.LIMIT) String limit,
       @QueryParam(RMWSConsts.STARTED_TIME_BEGIN) String startedBegin,
       @QueryParam(RMWSConsts.STARTED_TIME_END) String startedEnd,
       @QueryParam(RMWSConsts.FINISHED_TIME_BEGIN) String finishBegin,
@@ -493,135 +493,22 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
 
     initForReadableEndpoints();
 
-    boolean checkCount = false;
-    boolean checkStart = false;
-    boolean checkEnd = false;
-    boolean checkAppTypes = false;
-    boolean checkAppStates = false;
-    boolean checkAppTags = false;
-    long countNum = 0;
-
-    // set values suitable in case both of begin/end not specified
-    long sBegin = 0;
-    long sEnd = Long.MAX_VALUE;
-    long fBegin = 0;
-    long fEnd = Long.MAX_VALUE;
-
-    if (count != null && !count.isEmpty()) {
-      checkCount = true;
-      countNum = Long.parseLong(count);
-      if (countNum <= 0) {
-        throw new BadRequestException("limit value must be greater then 0");
-      }
-    }
-
-    if (startedBegin != null && !startedBegin.isEmpty()) {
-      checkStart = true;
-      sBegin = Long.parseLong(startedBegin);
-      if (sBegin < 0) {
-        throw new BadRequestException(
-            "startedTimeBegin must be greater than 0");
-      }
-    }
-    if (startedEnd != null && !startedEnd.isEmpty()) {
-      checkStart = true;
-      sEnd = Long.parseLong(startedEnd);
-      if (sEnd < 0) {
-        throw new BadRequestException("startedTimeEnd must be greater than 0");
-      }
-    }
-    if (sBegin > sEnd) {
-      throw new BadRequestException(
-          "startedTimeEnd must be greater than startTimeBegin");
-    }
-
-    if (finishBegin != null && !finishBegin.isEmpty()) {
-      checkEnd = true;
-      fBegin = Long.parseLong(finishBegin);
-      if (fBegin < 0) {
-        throw new BadRequestException("finishTimeBegin must be greater than 0");
-      }
-    }
-    if (finishEnd != null && !finishEnd.isEmpty()) {
-      checkEnd = true;
-      fEnd = Long.parseLong(finishEnd);
-      if (fEnd < 0) {
-        throw new BadRequestException("finishTimeEnd must be greater than 0");
-      }
-    }
-    if (fBegin > fEnd) {
-      throw new BadRequestException(
-          "finishTimeEnd must be greater than finishTimeBegin");
-    }
-
-    Set<String> appTypes = parseQueries(applicationTypes, false);
-    if (!appTypes.isEmpty()) {
-      checkAppTypes = true;
-    }
-
-    Set<String> appTags = parseQueries(applicationTags, false);
-    if (!appTags.isEmpty()) {
-      checkAppTags = true;
-    }
-
-    // stateQuery is deprecated.
-    if (stateQuery != null && !stateQuery.isEmpty()) {
-      statesQuery.add(stateQuery);
-    }
-    Set<String> appStates = parseQueries(statesQuery, true);
-    if (!appStates.isEmpty()) {
-      checkAppStates = true;
-    }
-
-    GetApplicationsRequest request = GetApplicationsRequest.newInstance();
-
-    if (checkStart) {
-      request.setStartRange(sBegin, sEnd);
-    }
-
-    if (checkEnd) {
-      request.setFinishRange(fBegin, fEnd);
-    }
-
-    if (checkCount) {
-      request.setLimit(countNum);
-    }
-
-    if (checkAppTypes) {
-      request.setApplicationTypes(appTypes);
-    }
-
-    if (checkAppTags) {
-      request.setApplicationTags(appTags);
-    }
-
-    if (checkAppStates) {
-      request.setApplicationStates(appStates);
-    }
-
-    if (queueQuery != null && !queueQuery.isEmpty()) {
-      ResourceScheduler rs = rm.getResourceScheduler();
-      if (rs instanceof CapacityScheduler) {
-        CapacityScheduler cs = (CapacityScheduler) rs;
-        // validate queue exists
-        try {
-          cs.getQueueInfo(queueQuery, false, false);
-        } catch (IOException e) {
-          throw new BadRequestException(e.getMessage());
-        }
-      }
-      Set<String> queues = new HashSet<String>(1);
-      queues.add(queueQuery);
-      request.setQueues(queues);
-    }
-
-    if (userQuery != null && !userQuery.isEmpty()) {
-      Set<String> users = new HashSet<String>(1);
-      users.add(userQuery);
-      request.setUsers(users);
-    }
+    GetApplicationsRequest request =
+            ApplicationsRequestBuilder.create()
+                    .withStateQuery(stateQuery)
+                    .withStatesQuery(statesQuery)
+                    .withUserQuery(userQuery)
+                    .withQueueQuery(rm, queueQuery)
+                    .withLimit(limit)
+                    .withStartedTimeBegin(startedBegin)
+                    .withStartedTimeEnd(startedEnd)
+                    .withFinishTimeBegin(finishBegin)
+                    .withFinishTimeEnd(finishEnd)
+                    .withApplicationTypes(applicationTypes)
+                    .withApplicationTags(applicationTags)
+            .build();
 
-    List<ApplicationReport> appReports = null;
+    List<ApplicationReport> appReports;
     try {
       appReports = rm.getClientRMService().getApplications(request)
           .getApplicationList();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5836e0a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java
new file mode 100644
index 0000000..7c9b711
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java
@@ -0,0 +1,529 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.webapp.WebServices.parseQueries;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestApplicationsRequestBuilder {
+
+  private GetApplicationsRequest getDefaultRequest() {
+    GetApplicationsRequest req = GetApplicationsRequest.newInstance();
+    req.setStartRange(0, Long.MAX_VALUE);
+    req.setFinishRange(0, Long.MAX_VALUE);
+    return req;
+  }
+
+  @Test
+  public void testDefaultRequest() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullStateQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withStateQuery(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyStateQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withStateQuery("").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidStateQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStateQuery("invalidState").build();
+  }
+
+  @Test
+  public void testRequestWithValidStateQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStateQuery(YarnApplicationState.NEW_SAVING.toString()).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    Set<String> appStates =
+        Sets.newHashSet(YarnApplicationState.NEW_SAVING.toString());
+    Set<String> appStatesLowerCase = parseQueries(appStates, true);
+    expectedRequest.setApplicationStates(appStatesLowerCase);
+
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyStateQueries() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStatesQuery(Sets.newHashSet()).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidStateQueries() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStatesQuery(Sets.newHashSet("a1", "a2", "")).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullStateQueries() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withStatesQuery(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidStateQueries() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStatesQuery(
+            Sets.newHashSet(YarnApplicationState.NEW_SAVING.toString(),
+                YarnApplicationState.NEW.toString()))
+        .build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    Set<String> appStates =
+        Sets.newHashSet(YarnApplicationState.NEW_SAVING.toString(),
+            YarnApplicationState.NEW.toString());
+    Set<String> appStatesLowerCase = parseQueries(appStates, true);
+    expectedRequest.setApplicationStates(appStatesLowerCase);
+
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullUserQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withUserQuery(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyUserQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withUserQuery("").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithUserQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withUserQuery("user1").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setUsers(Sets.newHashSet("user1"));
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullQueueQuery() {
+    ResourceManager rm = mock(ResourceManager.class);
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withQueueQuery(rm, null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyQueueQuery() {
+    ResourceManager rm = mock(ResourceManager.class);
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withQueueQuery(rm, "").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithQueueQueryExistingQueue() {
+    ResourceManager rm = mock(ResourceManager.class);
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withQueueQuery(rm, "queue1").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setQueues(Sets.newHashSet("queue1"));
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithQueueQueryNotExistingQueue() throws IOException {
+    CapacityScheduler cs = mock(CapacityScheduler.class);
+    when(cs.getQueueInfo(eq("queue1"), anyBoolean(), anyBoolean()))
+        .thenThrow(new IOException());
+    ResourceManager rm = mock(ResourceManager.class);
+    when(rm.getResourceScheduler()).thenReturn(cs);
+
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withQueueQuery(rm, "queue1").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setQueues(Sets.newHashSet("queue1"));
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullLimitQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withLimit(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyLimitQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withLimit("").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidLimitQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withLimit("bla").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidNegativeLimitQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withLimit("-10").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidLimitQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withLimit("999").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setLimit(999L);
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullStartedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeBegin(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyStartedTimeBeginQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withStartedTimeBegin("").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidStartedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeBegin("bla").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidNegativeStartedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeBegin("-1").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidStartedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeBegin("999").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setStartRange(999L, Long.MAX_VALUE);
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullStartedTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withStartedTimeEnd(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptywithStartedTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withStartedTimeEnd("").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidStartedTimeEndQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeEnd("bla").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidNegativeStartedTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withStartedTimeEnd("-1").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidStartedTimeEndQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeEnd("999").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setStartRange(0L, 999L);
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullFinishedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withFinishTimeBegin(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyFinishedTimeBeginQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withFinishTimeBegin("").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidFinishedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withFinishTimeBegin("bla").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidNegativeFinishedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withFinishTimeBegin("-1").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidFinishedTimeBeginQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withFinishTimeBegin("999").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setFinishRange(999L, Long.MAX_VALUE);
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullFinishedTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withFinishTimeEnd(null).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithEmptyFinishTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withFinishTimeEnd("").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidFinishTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withFinishTimeEnd("bla").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidNegativeFinishedTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withFinishTimeEnd("-1").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidFinishTimeEndQuery() {
+    GetApplicationsRequest request =
+        ApplicationsRequestBuilder.create().withFinishTimeEnd("999").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setFinishRange(0L, 999L);
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidStartTimeRangeQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeBegin("1000").withStartedTimeEnd("2000").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setStartRange(1000L, 2000L);
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidStartTimeRangeQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withStartedTimeBegin("2000").withStartedTimeEnd("1000").build();
+  }
+
+  @Test
+  public void testRequestWithValidFinishTimeRangeQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withFinishTimeBegin("1000").withFinishTimeEnd("2000").build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setFinishRange(1000L, 2000L);
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test(expected = BadRequestException.class)
+  public void testRequestWithInvalidFinishTimeRangeQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withFinishTimeBegin("2000").withFinishTimeEnd("1000").build();
+  }
+
+  @Test
+  public void testRequestWithNullApplicationTypesQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withApplicationTypes(null).build();
+  }
+
+  @Test
+  public void testRequestWithEmptyApplicationTypesQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withApplicationTypes(Sets.newHashSet()).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setApplicationTypes(Sets.newHashSet());
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidApplicationTypesQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withApplicationTypes(Sets.newHashSet("type1")).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setApplicationTypes(Sets.newHashSet("type1"));
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithNullApplicationTagsQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withApplicationTags(null).build();
+  }
+
+  @Test
+  public void testRequestWithEmptyApplicationTagsQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withApplicationTags(Sets.newHashSet()).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setApplicationTags(Sets.newHashSet());
+    assertEquals(expectedRequest, request);
+  }
+
+  @Test
+  public void testRequestWithValidApplicationTagsQuery() {
+    GetApplicationsRequest request = ApplicationsRequestBuilder.create()
+        .withApplicationTags(Sets.newHashSet("tag1")).build();
+
+    GetApplicationsRequest expectedRequest = getDefaultRequest();
+    expectedRequest.setApplicationTags(Sets.newHashSet("tag1"));
+    assertEquals(expectedRequest, request);
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] hadoop git commit: HDDS-181. CloseContainer should commit all pending open Keys on a datanode. Contributed by Shashikant Banerjee.

Posted by in...@apache.org.
HDDS-181. CloseContainer should commit all pending open Keys on a datanode. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbe2f622
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbe2f622
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbe2f622

Branch: refs/heads/HADOOP-15461
Commit: bbe2f6225ea500651de04c064f7b847be18e5b66
Parents: 9fa9e30
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Mon Jul 23 09:12:47 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Mon Jul 23 09:13:03 2018 +0530

----------------------------------------------------------------------
 .../ozone/container/common/helpers/KeyData.java |  20 +-
 .../common/impl/OpenContainerBlockMap.java      | 167 ++++++++++++
 .../container/keyvalue/KeyValueHandler.java     |  69 ++++-
 .../common/impl/TestCloseContainerHandler.java  | 260 +++++++++++++++++++
 4 files changed, 504 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbe2f622/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
index 129e4a8..b63332f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -25,6 +25,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.ArrayList;
 
 /**
  * Helper class to convert Protobuf to Java classes.
@@ -131,7 +132,25 @@ public class KeyData {
   }
 
   /**
+   * Adds chinkInfo to the list
+   */
+  public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
+    if (chunks == null) {
+      chunks = new ArrayList<>();
+    }
+    chunks.add(chunkInfo);
+  }
+
+  /**
+   * removes the chunk.
+   */
+  public void removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
+    chunks.remove(chunkInfo);
+  }
+
+  /**
    * Returns container ID.
+   *
    * @return long.
    */
   public long getContainerID() {
@@ -170,5 +189,4 @@ public class KeyData {
   public long getSize() {
     return chunks.parallelStream().mapToLong(e->e.getLen()).sum();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbe2f622/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
new file mode 100644
index 0000000..ab5f861
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.impl;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+
+/**
+ * This class will maintain list of open keys per container when closeContainer
+ * command comes, it should autocommit all open keys of a open container before
+ * marking the container as closed.
+ */
+public class OpenContainerBlockMap {
+
+  /**
+   * TODO : We may construct the openBlockMap by reading the Block Layout
+   * for each block inside a container listing all chunk files and reading the
+   * sizes. This will help to recreate the openKeys Map once the DataNode
+   * restarts.
+   *
+   * For now, we will track all open blocks of a container in the blockMap.
+   */
+  private final ConcurrentHashMap<Long, HashMap<Long, KeyData>>
+      openContainerBlockMap;
+
+  /**
+   * Constructs OpenContainerBlockMap.
+   */
+  public OpenContainerBlockMap() {
+     openContainerBlockMap = new ConcurrentHashMap<>();
+  }
+  /**
+   * Removes the Container matching with specified containerId.
+   * @param containerId containerId
+   */
+  public void removeContainer(long containerId) {
+    Preconditions
+        .checkState(containerId >= 0, "Container Id cannot be negative.");
+    openContainerBlockMap.computeIfPresent(containerId, (k, v) -> null);
+  }
+
+  /**
+   * updates the chunkInfoList in case chunk is added or deleted
+   * @param blockID id of the block.
+   * @param info - Chunk Info
+   * @param remove if true, deletes the chunkInfo list otherwise appends to the
+   *               chunkInfo List
+   * @throws IOException
+   */
+  public synchronized void updateOpenKeyMap(BlockID blockID,
+      ContainerProtos.ChunkInfo info, boolean remove) throws IOException {
+    if (remove) {
+      deleteChunkFromMap(blockID, info);
+    } else {
+      addChunkToMap(blockID, info);
+    }
+  }
+
+  private KeyData getKeyData(ContainerProtos.ChunkInfo info, BlockID blockID)
+      throws IOException {
+    KeyData keyData = new KeyData(blockID);
+    keyData.addMetadata("TYPE", "KEY");
+    keyData.addChunk(info);
+    return keyData;
+  }
+
+  private void addChunkToMap(BlockID blockID, ContainerProtos.ChunkInfo info)
+      throws IOException {
+    Preconditions.checkNotNull(info);
+    long containerId = blockID.getContainerID();
+    long localID = blockID.getLocalID();
+
+    KeyData keyData = openContainerBlockMap.computeIfAbsent(containerId,
+        emptyMap -> new LinkedHashMap<Long, KeyData>())
+        .putIfAbsent(localID, getKeyData(info, blockID));
+    // KeyData != null means the block already exist
+    if (keyData != null) {
+      HashMap<Long, KeyData> keyDataSet =
+          openContainerBlockMap.get(containerId);
+      keyDataSet.putIfAbsent(blockID.getLocalID(), getKeyData(info, blockID));
+      keyDataSet.computeIfPresent(blockID.getLocalID(), (key, value) -> {
+        value.addChunk(info);
+        return value;
+      });
+    }
+  }
+
+  /**
+   * removes the chunks from the chunkInfo list for the given block.
+   * @param blockID id of the block
+   * @param chunkInfo chunk info.
+   */
+  private synchronized void deleteChunkFromMap(BlockID blockID,
+      ContainerProtos.ChunkInfo chunkInfo) {
+    Preconditions.checkNotNull(chunkInfo);
+    Preconditions.checkNotNull(blockID);
+    HashMap<Long, KeyData> keyDataMap =
+        openContainerBlockMap.get(blockID.getContainerID());
+    if (keyDataMap != null) {
+      long localId = blockID.getLocalID();
+      KeyData keyData = keyDataMap.get(localId);
+      if (keyData != null) {
+        keyData.removeChunk(chunkInfo);
+      }
+    }
+  }
+
+  /**
+   * returns the list of open to the openContainerBlockMap
+   * @param containerId container id
+   * @return List of open Keys(blocks)
+   */
+  public List<KeyData> getOpenKeys(long containerId) {
+    HashMap<Long, KeyData> keyDataHashMap =
+        openContainerBlockMap.get(containerId);
+    return keyDataHashMap == null ? null :
+        keyDataHashMap.values().stream().collect(Collectors.toList());
+  }
+
+  /**
+   * removes the block from the block map.
+   * @param blockID
+   */
+  public synchronized void removeFromKeyMap(BlockID blockID) {
+    Preconditions.checkNotNull(blockID);
+    HashMap<Long, KeyData> keyDataMap =
+        openContainerBlockMap.get(blockID.getContainerID());
+    if (keyDataMap != null) {
+      keyDataMap.remove(blockID.getLocalID());
+      if (keyDataMap.size() == 0) {
+        removeContainer(blockID.getContainerID());
+      }
+    }
+  }
+
+  @VisibleForTesting
+  public ConcurrentHashMap<Long,
+      HashMap<Long, KeyData>> getContainerOpenKeyMap() {
+    return openContainerBlockMap;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbe2f622/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 84b3644..9aa3df7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
+import org.apache.hadoop.ozone.container.common.impl.OpenContainerBlockMap;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.SmallFileUtils;
@@ -117,7 +118,7 @@ public class KeyValueHandler extends Handler {
   private VolumeChoosingPolicy volumeChoosingPolicy;
   private final int maxContainerSizeGB;
   private final AutoCloseableLock handlerLock;
-
+  private final OpenContainerBlockMap openContainerBlockMap;
 
   public KeyValueHandler(Configuration config, ContainerSet contSet,
       VolumeSet volSet, ContainerMetrics metrics) {
@@ -145,6 +146,15 @@ public class KeyValueHandler extends Handler {
     // this handler lock is used for synchronizing createContainer Requests,
     // so using a fair lock here.
     handlerLock = new AutoCloseableLock(new ReentrantLock(true));
+    openContainerBlockMap = new OpenContainerBlockMap();
+  }
+
+  /**
+   * Returns OpenContainerBlockMap instance
+   * @return OpenContainerBlockMap
+   */
+  public OpenContainerBlockMap getOpenContainerBlockMap() {
+    return openContainerBlockMap;
   }
 
   @Override
@@ -333,8 +343,9 @@ public class KeyValueHandler extends Handler {
             "Container cannot be deleted because it is not empty.",
             ContainerProtos.Result.ERROR_CONTAINER_NOT_EMPTY);
       } else {
-        containerSet.removeContainer(
-            kvContainer.getContainerData().getContainerID());
+        long containerId = kvContainer.getContainerData().getContainerID();
+        containerSet.removeContainer(containerId);
+        openContainerBlockMap.removeContainer(containerId);
         // Release the lock first.
         // Avoid holding write locks for disk operations
         kvContainer.writeUnlock();
@@ -366,9 +377,21 @@ public class KeyValueHandler extends Handler {
     try {
       checkContainerOpen(kvContainer);
 
+      // remove the container from open block map once, all the blocks
+      // have been committed and the container is closed
+      kvContainer.getContainerData()
+          .setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
+      commitPendingKeys(kvContainer);
       kvContainer.close();
+      // make sure the the container open keys from BlockMap gets removed
+      openContainerBlockMap.removeContainer(
+          request.getCloseContainer().getContainerID());
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
+    } catch (IOException ex) {
+      return ContainerUtils.logAndReturnError(LOG,
+          new StorageContainerException("Close Container failed", ex,
+              IO_EXCEPTION), request);
     }
 
     return ContainerUtils.getSuccessResponse(request);
@@ -391,10 +414,8 @@ public class KeyValueHandler extends Handler {
 
       KeyData keyData = KeyData.getFromProtoBuf(
           request.getPutKey().getKeyData());
-      Preconditions.checkNotNull(keyData);
-
-      keyManager.putKey(kvContainer, keyData);
       long numBytes = keyData.getProtoBufMessage().toByteArray().length;
+      commitKey(keyData, kvContainer);
       metrics.incContainerBytesStats(Type.PutKey, numBytes);
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -407,6 +428,25 @@ public class KeyValueHandler extends Handler {
     return KeyUtils.getKeyResponseSuccess(request);
   }
 
+  private void commitPendingKeys(KeyValueContainer kvContainer)
+      throws IOException {
+    long containerId = kvContainer.getContainerData().getContainerID();
+    List<KeyData> pendingKeys =
+        this.openContainerBlockMap.getOpenKeys(containerId);
+    if (pendingKeys != null) {
+      for (KeyData keyData : pendingKeys) {
+        commitKey(keyData, kvContainer);
+      }
+    }
+  }
+
+  private void commitKey(KeyData keyData, KeyValueContainer kvContainer)
+      throws IOException {
+    Preconditions.checkNotNull(keyData);
+    keyManager.putKey(kvContainer, keyData);
+    //update the open key Map in containerManager
+    this.openContainerBlockMap.removeFromKeyMap(keyData.getBlockID());
+  }
   /**
    * Handle Get Key operation. Calls KeyManager to process the request.
    */
@@ -519,11 +559,13 @@ public class KeyValueHandler extends Handler {
 
       BlockID blockID = BlockID.getFromProtobuf(
           request.getDeleteChunk().getBlockID());
-      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getDeleteChunk()
-          .getChunkData());
+      ContainerProtos.ChunkInfo chunkInfoProto = request.getDeleteChunk()
+          .getChunkData();
+      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
       Preconditions.checkNotNull(chunkInfo);
 
       chunkManager.deleteChunk(kvContainer, blockID, chunkInfo);
+      openContainerBlockMap.updateOpenKeyMap(blockID, chunkInfoProto, true);
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
     } catch (IOException ex) {
@@ -552,8 +594,9 @@ public class KeyValueHandler extends Handler {
 
       BlockID blockID = BlockID.getFromProtobuf(
           request.getWriteChunk().getBlockID());
-      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getWriteChunk()
-          .getChunkData());
+      ContainerProtos.ChunkInfo chunkInfoProto =
+          request.getWriteChunk().getChunkData();
+      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
       Preconditions.checkNotNull(chunkInfo);
 
       byte[] data = null;
@@ -570,6 +613,9 @@ public class KeyValueHandler extends Handler {
           request.getWriteChunk().getStage() == Stage.COMBINED) {
         metrics.incContainerBytesStats(Type.WriteChunk, request.getWriteChunk()
             .getChunkData().getLen());
+        // the openContainerBlockMap should be updated only while writing data
+        // not during COMMIT_STAGE of handling write chunk request.
+        openContainerBlockMap.updateOpenKeyMap(blockID, chunkInfoProto, false);
       }
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -610,8 +656,9 @@ public class KeyValueHandler extends Handler {
       ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
           putSmallFileReq.getChunkInfo());
       Preconditions.checkNotNull(chunkInfo);
-
       byte[] data = putSmallFileReq.getData().toByteArray();
+      // chunks will be committed as a part of handling putSmallFile
+      // here. There is no need to maintain this info in openContainerBlockMap.
       chunkManager.writeChunk(
           kvContainer, blockID, chunkInfo, data, Stage.COMBINED);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbe2f622/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
new file mode 100644
index 0000000..3ab593e
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.impl;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.Assert;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+import java.util.LinkedList;
+import static org.apache.hadoop.ozone.container.ContainerTestHelper
+    .createSingleNodePipeline;
+import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
+import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
+import static org.apache.hadoop.ozone.container.ContainerTestHelper
+    .setDataChecksum;
+
+/**
+ * Simple tests to verify that closeContainer handler on Datanode.
+ */
+public class TestCloseContainerHandler {
+
+  @Rule
+  public TestRule timeout = new Timeout(300000);
+
+  private static Configuration conf;
+  private static HddsDispatcher dispatcher;
+  private static ContainerSet containerSet;
+  private static VolumeSet volumeSet;
+  private static KeyValueHandler handler;
+  private static OpenContainerBlockMap openContainerBlockMap;
+
+  private final static String DATANODE_UUID = UUID.randomUUID().toString();
+
+  private static final String baseDir = MiniDFSCluster.getBaseDirectory();
+  private static final String volume1 = baseDir + "disk1";
+  private static final String volume2 = baseDir + "disk2";
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new Configuration();
+    String dataDirKey = volume1 + "," + volume2;
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
+    containerSet = new ContainerSet();
+    DatanodeDetails datanodeDetails =
+        DatanodeDetails.newBuilder().setUuid(DATANODE_UUID)
+            .setHostName("localhost").setIpAddress("127.0.0.1").build();
+    volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
+
+    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet);
+    handler = (KeyValueHandler) dispatcher
+        .getHandler(ContainerProtos.ContainerType.KeyValueContainer);
+    openContainerBlockMap = handler.getOpenContainerBlockMap();
+    dispatcher.setScmId(UUID.randomUUID().toString());
+  }
+
+  @AfterClass
+  public static void shutdown() throws IOException {
+    // Delete the hdds volume root dir
+    List<HddsVolume> volumes = new ArrayList<>();
+    volumes.addAll(volumeSet.getVolumesList());
+    volumes.addAll(volumeSet.getFailedVolumesList());
+
+    for (HddsVolume volume : volumes) {
+      FileUtils.deleteDirectory(volume.getHddsRootDir());
+    }
+    volumeSet.shutdown();
+  }
+
+  private long createContainer() {
+    long testContainerId = ContainerTestHelper.getTestContainerID();
+    ContainerProtos.CreateContainerRequestProto createReq =
+        ContainerProtos.CreateContainerRequestProto.newBuilder()
+            .setContainerID(testContainerId)
+            .build();
+
+    ContainerProtos.ContainerCommandRequestProto request =
+        ContainerProtos.ContainerCommandRequestProto.newBuilder()
+            .setCmdType(ContainerProtos.Type.CreateContainer)
+            .setDatanodeUuid(DATANODE_UUID)
+            .setCreateContainer(createReq)
+            .build();
+
+    dispatcher.dispatch(request);
+    return testContainerId;
+  }
+
+  private List<ChunkInfo> writeChunkBuilder(BlockID blockID, Pipeline pipeline,
+      int chunkCount)
+      throws IOException, NoSuchAlgorithmException {
+    final int datalen = 1024;
+    long testContainerID = blockID.getContainerID();
+    List<ChunkInfo> chunkList = new LinkedList<>();
+    for (int x = 0; x < chunkCount; x++) {
+      ChunkInfo info = getChunk(blockID.getLocalID(), x, datalen * x, datalen);
+      byte[] data = getData(datalen);
+      setDataChecksum(info, data);
+      ContainerProtos.WriteChunkRequestProto.Builder writeRequest =
+          ContainerProtos.WriteChunkRequestProto.newBuilder();
+      writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
+      writeRequest.setChunkData(info.getProtoBufMessage());
+      writeRequest.setData(ByteString.copyFrom(data));
+      writeRequest.setStage(ContainerProtos.Stage.COMBINED);
+      ContainerProtos.ContainerCommandRequestProto.Builder request =
+          ContainerProtos.ContainerCommandRequestProto.newBuilder();
+      request.setCmdType(ContainerProtos.Type.WriteChunk);
+      request.setWriteChunk(writeRequest);
+      request.setTraceID(UUID.randomUUID().toString());
+      request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+      dispatcher.dispatch(request.build());
+      chunkList.add(info);
+    }
+    return chunkList;
+  }
+
+  @Test
+  public void testPutKeyWithMultipleChunks()
+      throws IOException, NoSuchAlgorithmException {
+    long testContainerID = createContainer();
+    Assert.assertNotNull(containerSet.getContainer(testContainerID));
+    BlockID blockID = ContainerTestHelper.
+        getTestBlockID(testContainerID);
+    Pipeline pipeline = createSingleNodePipeline();
+    List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
+    // the key should exist in the map
+    Assert.assertTrue(
+        openContainerBlockMap.getContainerOpenKeyMap().get(testContainerID)
+            .containsKey(blockID.getLocalID()));
+    KeyData keyData = new KeyData(blockID);
+    List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
+    for (ChunkInfo i : chunkList) {
+      chunkProtoList.add(i.getProtoBufMessage());
+    }
+    keyData.setChunks(chunkProtoList);
+    ContainerProtos.PutKeyRequestProto.Builder putKeyRequestProto =
+        ContainerProtos.PutKeyRequestProto.newBuilder();
+    putKeyRequestProto.setKeyData(keyData.getProtoBufMessage());
+    ContainerProtos.ContainerCommandRequestProto.Builder request =
+        ContainerProtos.ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(ContainerProtos.Type.PutKey);
+    request.setPutKey(putKeyRequestProto);
+    request.setTraceID(UUID.randomUUID().toString());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    dispatcher.dispatch(request.build());
+
+    //the open key should be removed from Map
+    Assert.assertNull(
+        openContainerBlockMap.getContainerOpenKeyMap().get(testContainerID));
+  }
+
+  @Test
+  public void testDeleteChunk() throws Exception {
+    long testContainerID = createContainer();
+    Assert.assertNotNull(containerSet.getContainer(testContainerID));
+    BlockID blockID = ContainerTestHelper.
+        getTestBlockID(testContainerID);
+    Pipeline pipeline = createSingleNodePipeline();
+    List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
+    // the key should exist in the map
+    Assert.assertTrue(
+        openContainerBlockMap.getContainerOpenKeyMap().get(testContainerID)
+            .containsKey(blockID.getLocalID()));
+    Assert.assertTrue(
+        openContainerBlockMap.getContainerOpenKeyMap().get(testContainerID)
+            .get(blockID.getLocalID()).getChunks().size() == 3);
+    ContainerProtos.DeleteChunkRequestProto.Builder deleteChunkProto =
+        ContainerProtos.DeleteChunkRequestProto.newBuilder();
+    deleteChunkProto.setBlockID(blockID.getDatanodeBlockIDProtobuf());
+    deleteChunkProto.setChunkData(chunkList.get(0).getProtoBufMessage());
+    ContainerProtos.WriteChunkRequestProto.Builder writeRequest =
+        ContainerProtos.WriteChunkRequestProto.newBuilder();
+    writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
+    writeRequest.setChunkData(chunkList.get(0).getProtoBufMessage());
+    ContainerProtos.ContainerCommandRequestProto.Builder request =
+        ContainerProtos.ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(ContainerProtos.Type.DeleteChunk);
+    request.setDeleteChunk(deleteChunkProto);
+    request.setWriteChunk(writeRequest);
+    request.setTraceID(UUID.randomUUID().toString());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    dispatcher.dispatch(request.build());
+    Assert.assertTrue(
+        openContainerBlockMap.getContainerOpenKeyMap().get(testContainerID)
+            .get(blockID.getLocalID()).getChunks().size() == 2);
+
+  }
+
+  @Test
+  public void testCloseContainer() throws Exception {
+    long testContainerID = createContainer();
+    Assert.assertNotNull(containerSet.getContainer(testContainerID));
+    BlockID blockID = ContainerTestHelper.
+        getTestBlockID(testContainerID);
+    Pipeline pipeline = createSingleNodePipeline();
+    List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
+
+    Container container = containerSet.getContainer(testContainerID);
+    KeyData keyData = openContainerBlockMap.getContainerOpenKeyMap().
+        get(testContainerID).get(blockID.getLocalID());
+    // the key should exist in the map
+    Assert.assertTrue(
+        openContainerBlockMap.getContainerOpenKeyMap().get(testContainerID)
+            .containsKey(blockID.getLocalID()));
+    Assert.assertTrue(
+        keyData.getChunks().size() == chunkList.size());
+    ContainerProtos.CloseContainerRequestProto.Builder closeContainerProto =
+        ContainerProtos.CloseContainerRequestProto.newBuilder();
+    closeContainerProto.setContainerID(blockID.getContainerID());
+    ContainerProtos.ContainerCommandRequestProto.Builder request =
+        ContainerProtos.ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(ContainerProtos.Type.CloseContainer);
+    request.setCloseContainer(closeContainerProto);
+    request.setTraceID(UUID.randomUUID().toString());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    dispatcher.dispatch(request.build());
+    Assert.assertNull(
+        openContainerBlockMap.getContainerOpenKeyMap().get(testContainerID));
+    // Make sure the key got committed
+    Assert.assertNotNull(handler.getKeyManager().getKey(container, blockID));
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] hadoop git commit: HDDS-269. Refactor IdentifiableEventPayload to use a long ID. Contributed by Ajay Kumar.

Posted by in...@apache.org.
HDDS-269. Refactor IdentifiableEventPayload to use a long ID. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9c44ecf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9c44ecf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9c44ecf

Branch: refs/heads/HADOOP-15461
Commit: e9c44ecfc6ca9d02b2073f16eeadfd7f4a490799
Parents: cbf2026
Author: Nanda kumar <na...@apache.org>
Authored: Fri Jul 20 21:39:45 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Fri Jul 20 21:39:45 2018 +0530

----------------------------------------------------------------------
 .../common/statemachine/StateContext.java       |  4 +-
 .../commandhandler/CommandHandler.java          |  6 +-
 .../commands/CloseContainerCommand.java         |  6 +-
 .../protocol/commands/DeleteBlocksCommand.java  |  6 +-
 .../commands/ReplicateContainerCommand.java     |  6 +-
 .../protocol/commands/ReregisterCommand.java    |  2 +-
 .../ozone/protocol/commands/SCMCommand.java     | 16 ++---
 .../hadoop/hdds/server/events/EventWatcher.java | 47 ++++++++-------
 .../server/events/IdentifiableEventPayload.java |  4 +-
 .../hdds/server/events/TestEventWatcher.java    | 61 +++++++++-----------
 10 files changed, 75 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 7ed30f8..faaff69 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -311,9 +311,9 @@ public class StateContext {
    * @param cmd - {@link SCMCommand}.
    */
   public void addCmdStatus(SCMCommand cmd) {
-    this.addCmdStatus(cmd.getCmdId(),
+    this.addCmdStatus(cmd.getId(),
         CommandStatusBuilder.newBuilder()
-            .setCmdId(cmd.getCmdId())
+            .setCmdId(cmd.getId())
             .setStatus(Status.PENDING)
             .setType(cmd.getType())
             .build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
index 2016419..71c25b5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
@@ -64,9 +64,9 @@ public interface CommandHandler {
    */
   default void updateCommandStatus(StateContext context, SCMCommand command,
       boolean cmdExecuted, Logger log) {
-    if (!context.updateCommandStatus(command.getCmdId(), cmdExecuted)) {
-      log.debug("{} with cmdId:{} not found.", command.getType(),
-          command.getCmdId());
+    if (!context.updateCommandStatus(command.getId(), cmdExecuted)) {
+      log.debug("{} with Id:{} not found.", command.getType(),
+          command.getId());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
index 6b7c22c..1829642 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
@@ -41,8 +41,8 @@ public class CloseContainerCommand
 
   // Should be called only for protobuf conversion
   private CloseContainerCommand(long containerID,
-      HddsProtos.ReplicationType replicationType, long cmdId) {
-    super(cmdId);
+      HddsProtos.ReplicationType replicationType, long id) {
+    super(id);
     this.containerID = containerID;
     this.replicationType = replicationType;
   }
@@ -70,7 +70,7 @@ public class CloseContainerCommand
   public CloseContainerCommandProto getProto() {
     return CloseContainerCommandProto.newBuilder()
         .setContainerID(containerID)
-        .setCmdId(getCmdId())
+        .setCmdId(getId())
         .setReplicationType(replicationType).build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
index 46af794..07feeff 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
@@ -42,8 +42,8 @@ public class DeleteBlocksCommand extends
 
   // Should be called only for protobuf conversion
   private DeleteBlocksCommand(List<DeletedBlocksTransaction> blocks,
-      long cmdId) {
-    super(cmdId);
+      long id) {
+    super(id);
     this.blocksTobeDeleted = blocks;
   }
 
@@ -69,7 +69,7 @@ public class DeleteBlocksCommand extends
 
   public DeleteBlocksCommandProto getProto() {
     return DeleteBlocksCommandProto.newBuilder()
-        .setCmdId(getCmdId())
+        .setCmdId(getId())
         .addAllDeletedBlocksTransactions(blocksTobeDeleted).build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
index e860c93..8530285 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
@@ -51,8 +51,8 @@ public class ReplicateContainerCommand
 
   // Should be called only for protobuf conversion
   public ReplicateContainerCommand(long containerID,
-      List<DatanodeDetails> sourceDatanodes, long cmdId) {
-    super(cmdId);
+      List<DatanodeDetails> sourceDatanodes, long id) {
+    super(id);
     this.containerID = containerID;
     this.sourceDatanodes = sourceDatanodes;
   }
@@ -69,7 +69,7 @@ public class ReplicateContainerCommand
 
   public ReplicateContainerCommandProto getProto() {
     Builder builder = ReplicateContainerCommandProto.newBuilder()
-        .setCmdId(getCmdId())
+        .setCmdId(getId())
         .setContainerID(containerID);
     for (DatanodeDetails dd : sourceDatanodes) {
       builder.addSources(dd.getProtoBufMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
index d557104..09f361d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
@@ -55,7 +55,7 @@ public class ReregisterCommand extends
    * @return cmdId.
    */
   @Override
-  public long getCmdId() {
+  public long getId() {
     return 0;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
index 6cda591..5773bf1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
@@ -21,21 +21,23 @@ import com.google.protobuf.GeneratedMessage;
 import org.apache.hadoop.hdds.HddsIdFactory;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
 
 /**
  * A class that acts as the base class to convert between Java and SCM
  * commands in protobuf format.
  * @param <T>
  */
-public abstract class SCMCommand<T extends GeneratedMessage> {
-  private long cmdId;
+public abstract class SCMCommand<T extends GeneratedMessage> implements
+    IdentifiableEventPayload {
+  private long id;
 
   SCMCommand() {
-    this.cmdId = HddsIdFactory.getLongId();
+    this.id = HddsIdFactory.getLongId();
   }
 
-  SCMCommand(long cmdId) {
-    this.cmdId = cmdId;
+  SCMCommand(long id) {
+    this.id = id;
   }
   /**
    * Returns the type of this command.
@@ -53,8 +55,8 @@ public abstract class SCMCommand<T extends GeneratedMessage> {
    * Gets the commandId of this object.
    * @return uuid.
    */
-  public long getCmdId() {
-    return cmdId;
+  public long getId() {
+    return id;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index 8c5605a..473c152 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -21,7 +21,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
@@ -61,22 +60,22 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   private final Event<COMPLETION_PAYLOAD> completionEvent;
 
-  private final LeaseManager<UUID> leaseManager;
+  private final LeaseManager<Long> leaseManager;
 
   private final EventWatcherMetrics metrics;
 
   private final String name;
 
-  protected final Map<UUID, TIMEOUT_PAYLOAD> trackedEventsByUUID =
+  protected final Map<Long, TIMEOUT_PAYLOAD> trackedEventsByID =
       new ConcurrentHashMap<>();
 
   protected final Set<TIMEOUT_PAYLOAD> trackedEvents = new HashSet<>();
 
-  private final Map<UUID, Long> startTrackingTimes = new HashedMap();
+  private final Map<Long, Long> startTrackingTimes = new HashedMap();
 
   public EventWatcher(String name, Event<TIMEOUT_PAYLOAD> startEvent,
       Event<COMPLETION_PAYLOAD> completionEvent,
-      LeaseManager<UUID> leaseManager) {
+      LeaseManager<Long> leaseManager) {
     this.startEvent = startEvent;
     this.completionEvent = completionEvent;
     this.leaseManager = leaseManager;
@@ -94,7 +93,7 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   public EventWatcher(Event<TIMEOUT_PAYLOAD> startEvent,
       Event<COMPLETION_PAYLOAD> completionEvent,
-      LeaseManager<UUID> leaseManager) {
+      LeaseManager<Long> leaseManager) {
     this("", startEvent, completionEvent, leaseManager);
   }
 
@@ -103,13 +102,13 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
     queue.addHandler(startEvent, this::handleStartEvent);
 
     queue.addHandler(completionEvent, (completionPayload, publisher) -> {
-      UUID uuid = completionPayload.getUUID();
+      long id = completionPayload.getId();
       try {
-        handleCompletion(uuid, publisher);
+        handleCompletion(id, publisher);
       } catch (LeaseNotFoundException e) {
         //It's already done. Too late, we already retried it.
         //Not a real problem.
-        LOG.warn("Completion event without active lease. UUID={}", uuid);
+        LOG.warn("Completion event without active lease. Id={}", id);
       }
     });
 
@@ -120,13 +119,13 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
   private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload,
       EventPublisher publisher) {
     metrics.incrementTrackedEvents();
-    UUID identifier = payload.getUUID();
+    long identifier = payload.getId();
     startTrackingTimes.put(identifier, System.currentTimeMillis());
 
-    trackedEventsByUUID.put(identifier, payload);
+    trackedEventsByID.put(identifier, payload);
     trackedEvents.add(payload);
     try {
-      Lease<UUID> lease = leaseManager.acquire(identifier);
+      Lease<Long> lease = leaseManager.acquire(identifier);
       try {
         lease.registerCallBack(() -> {
           handleTimeout(publisher, identifier);
@@ -141,23 +140,23 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
     }
   }
 
-  private synchronized void handleCompletion(UUID uuid,
+  private synchronized void handleCompletion(long id,
       EventPublisher publisher) throws LeaseNotFoundException {
     metrics.incrementCompletedEvents();
-    leaseManager.release(uuid);
-    TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(uuid);
+    leaseManager.release(id);
+    TIMEOUT_PAYLOAD payload = trackedEventsByID.remove(id);
     trackedEvents.remove(payload);
-    long originalTime = startTrackingTimes.remove(uuid);
+    long originalTime = startTrackingTimes.remove(id);
     metrics.updateFinishingTime(System.currentTimeMillis() - originalTime);
     onFinished(publisher, payload);
   }
 
   private synchronized void handleTimeout(EventPublisher publisher,
-      UUID identifier) {
+      long identifier) {
     metrics.incrementTimedOutEvents();
-    TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(identifier);
+    TIMEOUT_PAYLOAD payload = trackedEventsByID.remove(identifier);
     trackedEvents.remove(payload);
-    startTrackingTimes.remove(payload.getUUID());
+    startTrackingTimes.remove(payload.getId());
     onTimeout(publisher, payload);
   }
 
@@ -171,12 +170,12 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   public synchronized boolean remove(TIMEOUT_PAYLOAD payload) {
     try {
-      leaseManager.release(payload.getUUID());
+      leaseManager.release(payload.getId());
     } catch (LeaseNotFoundException e) {
-      LOG.warn("Completion event without active lease. UUID={}",
-          payload.getUUID());
+      LOG.warn("Completion event without active lease. Id={}",
+          payload.getId());
     }
-    trackedEventsByUUID.remove(payload.getUUID());
+    trackedEventsByID.remove(payload.getId());
     return trackedEvents.remove(payload);
 
   }
@@ -187,7 +186,7 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   public List<TIMEOUT_PAYLOAD> getTimeoutEvents(
       Predicate<? super TIMEOUT_PAYLOAD> predicate) {
-    return trackedEventsByUUID.values().stream().filter(predicate)
+    return trackedEventsByID.values().stream().filter(predicate)
         .collect(Collectors.toList());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
index e73e30f..3faa8e7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
@@ -17,14 +17,12 @@
  */
 package org.apache.hadoop.hdds.server.events;
 
-import java.util.UUID;
-
 /**
  * Event with an additional unique identifier.
  *
  */
 public interface IdentifiableEventPayload {
 
-  UUID getUUID();
+  long getId();
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
index 38e1554..8f18478 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
@@ -19,15 +19,9 @@ package org.apache.hadoop.hdds.server.events;
 
 import java.util.List;
 import java.util.Objects;
-import java.util.UUID;
-
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.hdds.HddsIdFactory;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.apache.hadoop.test.MetricsAsserts;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -47,7 +41,7 @@ public class TestEventWatcher {
   private static final TypedEvent<ReplicationCompletedEvent>
       REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class);
 
-  LeaseManager<UUID> leaseManager;
+  LeaseManager<Long> leaseManager;
 
   @Before
   public void startLeaseManager() {
@@ -77,21 +71,21 @@ public class TestEventWatcher {
 
     replicationWatcher.start(queue);
 
-    UUID uuid1 = UUID.randomUUID();
-    UUID uuid2 = UUID.randomUUID();
+    long id1 = HddsIdFactory.getLongId();
+    long id2 = HddsIdFactory.getLongId();
 
     queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(uuid1, "C1"));
+        new UnderreplicatedEvent(id1, "C1"));
 
     queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(uuid2, "C2"));
+        new UnderreplicatedEvent(id2, "C2"));
 
     Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
 
     Thread.sleep(1000);
 
     queue.fireEvent(REPLICATION_COMPLETED,
-        new ReplicationCompletedEvent(uuid1, "C2", "D1"));
+        new ReplicationCompletedEvent(id1, "C2", "D1"));
 
     Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
 
@@ -100,8 +94,8 @@ public class TestEventWatcher {
     queue.processAll(1000L);
 
     Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size());
-    Assert.assertEquals(uuid2,
-        underReplicatedEvents.getReceivedEvents().get(0).UUID);
+    Assert.assertEquals(id2,
+        underReplicatedEvents.getReceivedEvents().get(0).id);
 
   }
 
@@ -121,15 +115,15 @@ public class TestEventWatcher {
     replicationWatcher.start(queue);
 
     UnderreplicatedEvent event1 =
-        new UnderreplicatedEvent(UUID.randomUUID(), "C1");
+        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
 
     queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
 
     queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(UUID.randomUUID(), "C2"));
+        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2"));
 
     queue.fireEvent(WATCH_UNDER_REPLICATED,
-        new UnderreplicatedEvent(UUID.randomUUID(), "C1"));
+        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"));
 
     queue.processAll(1000L);
     Thread.sleep(1000L);
@@ -166,13 +160,13 @@ public class TestEventWatcher {
 
     //send 3 event to track 3 in-progress activity
     UnderreplicatedEvent event1 =
-        new UnderreplicatedEvent(UUID.randomUUID(), "C1");
+        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
 
     UnderreplicatedEvent event2 =
-        new UnderreplicatedEvent(UUID.randomUUID(), "C2");
+        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2");
 
     UnderreplicatedEvent event3 =
-        new UnderreplicatedEvent(UUID.randomUUID(), "C1");
+        new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1");
 
     queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
 
@@ -182,11 +176,10 @@ public class TestEventWatcher {
 
     //1st event is completed, don't need to track any more
     ReplicationCompletedEvent event1Completed =
-        new ReplicationCompletedEvent(event1.UUID, "C1", "D1");
+        new ReplicationCompletedEvent(event1.id, "C1", "D1");
 
     queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
 
-
     Thread.sleep(2200l);
 
     //until now: 3 in-progress activities are tracked with three
@@ -218,7 +211,7 @@ public class TestEventWatcher {
 
     public CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
         Event<ReplicationCompletedEvent> completionEvent,
-        LeaseManager<UUID> leaseManager) {
+        LeaseManager<Long> leaseManager) {
       super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
     }
 
@@ -243,21 +236,21 @@ public class TestEventWatcher {
   private static class ReplicationCompletedEvent
       implements IdentifiableEventPayload {
 
-    private final UUID UUID;
+    private final long id;
 
     private final String containerId;
 
     private final String datanodeId;
 
-    public ReplicationCompletedEvent(UUID UUID, String containerId,
+    public ReplicationCompletedEvent(long id, String containerId,
         String datanodeId) {
-      this.UUID = UUID;
+      this.id = id;
       this.containerId = containerId;
       this.datanodeId = datanodeId;
     }
 
-    public UUID getUUID() {
-      return UUID;
+    public long getId() {
+      return id;
     }
 
     @Override
@@ -284,17 +277,17 @@ public class TestEventWatcher {
 
       implements IdentifiableEventPayload {
 
-    private final UUID UUID;
+    private final long id;
 
     private final String containerId;
 
-    public UnderreplicatedEvent(UUID UUID, String containerId) {
+    public UnderreplicatedEvent(long id, String containerId) {
       this.containerId = containerId;
-      this.UUID = UUID;
+      this.id = id;
     }
 
-    public UUID getUUID() {
-      return UUID;
+    public long getId() {
+      return id;
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] hadoop git commit: HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh.

Posted by in...@apache.org.
HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6837121a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6837121a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6837121a

Branch: refs/heads/HADOOP-15461
Commit: 6837121a43231f854b0b22ad20330012439313ce
Parents: ba25d27
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jul 20 13:03:25 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 20 13:03:35 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   5 +
 .../scm/container/common/helpers/Pipeline.java  |   7 +
 .../common/src/main/resources/ozone-default.xml |  12 +
 .../common/statemachine/StateContext.java       |  52 +++-
 .../states/endpoint/HeartbeatEndpointTask.java  |  24 +-
 .../StorageContainerDatanodeProtocol.proto      |   4 +-
 .../common/report/TestReportPublisher.java      |  41 ---
 .../endpoint/TestHeartbeatEndpointTask.java     | 302 +++++++++++++++++++
 .../common/states/endpoint/package-info.java    |  18 ++
 .../hdds/scm/container/ContainerMapping.java    |   4 +
 .../hdds/scm/exceptions/SCMException.java       |   1 +
 .../hdds/scm/pipelines/PipelineManager.java     |  64 ++--
 .../hdds/scm/pipelines/PipelineSelector.java    | 212 +++++++++++--
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  33 +-
 .../standalone/StandaloneManagerImpl.java       |  21 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |  14 +
 16 files changed, 668 insertions(+), 146 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 71184cf..6e940ad 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -236,6 +236,11 @@ public final class ScmConfigKeys {
   public static final String
       OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
 
+  public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT =
+      "ozone.scm.pipeline.creation.lease.timeout";
+
+  public static final String
+      OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
 
   public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
       "ozone.scm.block.deletion.max.retry";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
index c5794f4..534c9fd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -214,6 +214,13 @@ public class Pipeline {
   }
 
   /**
+   * Update the State of the pipeline.
+   */
+  public void setLifeCycleState(HddsProtos.LifeCycleState nextState) {
+     lifeCycleState = nextState;
+  }
+
+  /**
    * Gets the pipeline Name.
    *
    * @return - Name of the pipeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5a1d26a..69a382a 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1085,5 +1085,17 @@
       executed since last report. Unit could be defined with
       postfix (ns,ms,s,m,h,d)</description>
   </property>
+  <property>
+    <name>ozone.scm.pipeline.creation.lease.timeout</name>
+    <value>60s</value>
+    <tag>OZONE, SCM, PIPELINE</tag>
+    <description>
+      Pipeline creation timeout in milliseconds to be used by SCM. When
+      BEGIN_CREATE event happens the pipeline is moved from ALLOCATED to
+      CREATING state, SCM will now wait for the configured amount of time
+      to get COMPLETE_CREATE event if it doesn't receive it will move the
+      pipeline to DELETING.
+    </description>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index faaff69..4951f2a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -20,14 +20,18 @@ import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
 import org.apache.hadoop.ozone.container.common.states.datanode
     .InitDatanodeState;
 import org.apache.hadoop.ozone.container.common.states.datanode
     .RunningDatanodeState;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus.CommandStatusBuilder;
+import org.apache.hadoop.ozone.protocol.commands.CommandStatus
+    .CommandStatusBuilder;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -43,6 +47,7 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
 
@@ -59,6 +64,7 @@ public class StateContext {
   private final AtomicLong stateExecutionCount;
   private final Configuration conf;
   private final Queue<GeneratedMessage> reports;
+  private final Queue<ContainerAction> containerActions;
   private DatanodeStateMachine.DatanodeStates state;
 
   /**
@@ -76,6 +82,7 @@ public class StateContext {
     commandQueue = new LinkedList<>();
     cmdStatusMap = new ConcurrentHashMap<>();
     reports = new LinkedList<>();
+    containerActions = new LinkedList<>();
     lock = new ReentrantLock();
     stateExecutionCount = new AtomicLong(0);
   }
@@ -198,6 +205,47 @@ public class StateContext {
     return results;
   }
 
+
+  /**
+   * Adds the ContainerAction to ContainerAction queue.
+   *
+   * @param containerAction ContainerAction to be added
+   */
+  public void addContainerAction(ContainerAction containerAction) {
+    synchronized (containerActions) {
+      containerActions.add(containerAction);
+    }
+  }
+
+  /**
+   * Returns all the pending ContainerActions from the ContainerAction queue,
+   * or empty list if the queue is empty.
+   *
+   * @return List<ContainerAction>
+   */
+  public List<ContainerAction> getAllPendingContainerActions() {
+    return getPendingContainerAction(Integer.MAX_VALUE);
+  }
+
+  /**
+   * Returns pending ContainerActions from the ContainerAction queue with a
+   * max limit on list size, or empty list if the queue is empty.
+   *
+   * @return List<ContainerAction>
+   */
+  public List<ContainerAction> getPendingContainerAction(int maxLimit) {
+    List<ContainerAction> results = new ArrayList<>();
+    synchronized (containerActions) {
+      containerActions.parallelStream().limit(maxLimit).collect(Collectors.toList());
+      ContainerAction action = containerActions.poll();
+      while(results.size() < maxLimit && action != null) {
+        results.add(action);
+        action = containerActions.poll();
+      }
+    }
+    return results;
+  }
+
   /**
    * Returns the next task to get executed by the datanode state machine.
    * @return A callable that will be executed by the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 260a245..214e1cd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -25,6 +25,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -46,6 +50,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.time.ZonedDateTime;
+import java.util.List;
 import java.util.concurrent.Callable;
 
 /**
@@ -107,7 +112,7 @@ public class HeartbeatEndpointTask
           SCMHeartbeatRequestProto.newBuilder()
               .setDatanodeDetails(datanodeDetailsProto);
       addReports(requestBuilder);
-
+      addContainerActions(requestBuilder);
       SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
           .sendHeartbeat(requestBuilder.build());
       processResponse(reponse, datanodeDetailsProto);
@@ -140,6 +145,23 @@ public class HeartbeatEndpointTask
   }
 
   /**
+   * Adds all the pending ContainerActions to the heartbeat.
+   *
+   * @param requestBuilder builder to which the report has to be added.
+   */
+  private void addContainerActions(
+      SCMHeartbeatRequestProto.Builder requestBuilder) {
+    List<ContainerAction> actions = context.getAllPendingContainerActions();
+    if (!actions.isEmpty()) {
+      ContainerActionsProto cap = ContainerActionsProto.newBuilder()
+          .addAllContainerActions(actions)
+          .build();
+      requestBuilder.setContainerActions(cap);
+    }
+  }
+
+
+  /**
    * Returns a builder class for HeartbeatEndpointTask task.
    * @return   Builder.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 4238389..d89567b 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -79,8 +79,8 @@ message SCMHeartbeatRequestProto {
   required DatanodeDetailsProto datanodeDetails = 1;
   optional NodeReportProto nodeReport = 2;
   optional ContainerReportsProto containerReport = 3;
-  optional ContainerActionsProto containerActions = 4;
-  optional CommandStatusReportsProto commandStatusReport = 5;
+  optional CommandStatusReportsProto commandStatusReport = 4;
+  optional ContainerActionsProto containerActions = 5;
 }
 
 /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index a0db2e8..811599f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.container.common.report;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.Descriptors;
 import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -28,14 +27,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
@@ -178,22 +171,6 @@ public class TestReportPublisher {
     executorService.shutdown();
   }
 
-  @Test
-  public void testAddingReportToHeartbeat() {
-    GeneratedMessage nodeReport = NodeReportProto.getDefaultInstance();
-    GeneratedMessage containerReport = ContainerReportsProto
-        .getDefaultInstance();
-    SCMHeartbeatRequestProto.Builder heartbeatBuilder =
-        SCMHeartbeatRequestProto.newBuilder();
-    heartbeatBuilder.setDatanodeDetails(
-        getDatanodeDetails().getProtoBufMessage());
-    addReport(heartbeatBuilder, nodeReport);
-    addReport(heartbeatBuilder, containerReport);
-    SCMHeartbeatRequestProto heartbeat = heartbeatBuilder.build();
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-  }
-
   /**
    * Get a datanode details.
    *
@@ -222,22 +199,4 @@ public class TestReportPublisher {
     return builder.build();
   }
 
-  /**
-   * Adds the report to heartbeat.
-   *
-   * @param requestBuilder builder to which the report has to be added.
-   * @param report         the report to be added.
-   */
-  private static void addReport(SCMHeartbeatRequestProto.Builder
-      requestBuilder, GeneratedMessage report) {
-    String reportName = report.getDescriptorForType().getFullName();
-    for (Descriptors.FieldDescriptor descriptor :
-        SCMHeartbeatRequestProto.getDescriptor().getFields()) {
-      String heartbeatFieldName = descriptor.getMessageType().getFullName();
-      if (heartbeatFieldName.equals(reportName)) {
-        requestBuilder.setField(descriptor, report);
-      }
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
new file mode 100644
index 0000000..87bd811
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -0,0 +1,302 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.states.endpoint;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .DatanodeStateMachine.DatanodeStates;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.protocolPB
+    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+
+import java.util.UUID;
+
+import static org.mockito.ArgumentMatchers.any;
+
+/**
+ * This class tests the functionality of HeartbeatEndpointTask.
+ */
+public class TestHeartbeatEndpointTask {
+
+
+  @Test
+  public void testheartbeatWithoutReports() throws Exception {
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(scm);
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithNodeReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(NodeReportProto.getDefaultInstance());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertTrue(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithContainerReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(ContainerReportsProto.getDefaultInstance());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertTrue(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithCommandStatusReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(CommandStatusReportsProto.getDefaultInstance());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertTrue(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithContainerActions() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addContainerAction(getContainerAction());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertTrue(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithAllReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(NodeReportProto.getDefaultInstance());
+    context.addReport(ContainerReportsProto.getDefaultInstance());
+    context.addReport(CommandStatusReportsProto.getDefaultInstance());
+    context.addContainerAction(getContainerAction());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertTrue(heartbeat.hasNodeReport());
+    Assert.assertTrue(heartbeat.hasContainerReport());
+    Assert.assertTrue(heartbeat.hasCommandStatusReport());
+    Assert.assertTrue(heartbeat.hasContainerActions());
+  }
+
+  /**
+   * Creates HeartbeatEndpointTask for the given StorageContainerManager proxy.
+   *
+   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
+   *
+   * @return HeartbeatEndpointTask
+   */
+  private HeartbeatEndpointTask getHeartbeatEndpointTask(
+      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+    return getHeartbeatEndpointTask(conf, context, proxy);
+
+  }
+
+  /**
+   * Creates HeartbeatEndpointTask with the given conf, context and
+   * StorageContainerManager client side proxy.
+   *
+   * @param conf Configuration
+   * @param context StateContext
+   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
+   *
+   * @return HeartbeatEndpointTask
+   */
+  private HeartbeatEndpointTask getHeartbeatEndpointTask(
+      Configuration conf,
+      StateContext context,
+      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
+    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
+        .setUuid(UUID.randomUUID().toString())
+        .setHostName("localhost")
+        .setIpAddress("127.0.0.1")
+        .build();
+    EndpointStateMachine endpointStateMachine = Mockito
+        .mock(EndpointStateMachine.class);
+    Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy);
+    return HeartbeatEndpointTask.newBuilder()
+        .setConfig(conf)
+        .setDatanodeDetails(datanodeDetails)
+        .setContext(context)
+        .setEndpointStateMachine(endpointStateMachine)
+        .build();
+  }
+
+  private ContainerAction getContainerAction() {
+    ContainerAction.Builder builder = ContainerAction.newBuilder();
+    ContainerInfo containerInfo = ContainerInfo.newBuilder()
+        .setContainerID(1L)
+        .build();
+    builder.setContainer(containerInfo)
+        .setAction(ContainerAction.Action.CLOSE)
+        .setReason(ContainerAction.Reason.CONTAINER_FULL);
+    return builder.build();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
new file mode 100644
index 0000000..d120a5c
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.states.endpoint;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 26f4d86..f07d22b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -658,6 +658,10 @@ public class ContainerMapping implements Mapping {
     if (containerStore != null) {
       containerStore.close();
     }
+
+    if (pipelineSelector != null) {
+      pipelineSelector.shutdown();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index d7d70ef..0085542 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -107,6 +107,7 @@ public class SCMException extends IOException {
     FAILED_TO_LOAD_OPEN_CONTAINER,
     FAILED_TO_ALLOCATE_CONTAINER,
     FAILED_TO_CHANGE_CONTAINER_STATE,
+    FAILED_TO_CHANGE_PIPELINE_STATE,
     CONTAINER_EXISTS,
     FAILED_TO_FIND_CONTAINER,
     FAILED_TO_FIND_CONTAINER_WITH_SPACE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
index a041973..77d8211 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
@@ -59,41 +59,16 @@ public abstract class PipelineManager {
    * @return a Pipeline.
    */
   public synchronized final Pipeline getPipeline(
-      ReplicationFactor replicationFactor, ReplicationType replicationType)
-      throws IOException {
-    /**
-     * In the Ozone world, we have a very simple policy.
-     *
-     * 1. Try to create a pipeline if there are enough free nodes.
-     *
-     * 2. This allows all nodes to part of a pipeline quickly.
-     *
-     * 3. if there are not enough free nodes, return pipeline in a
-     * round-robin fashion.
-     *
-     * TODO: Might have to come up with a better algorithm than this.
-     * Create a new placement policy that returns pipelines in round robin
-     * fashion.
-     */
-    Pipeline pipeline = allocatePipeline(replicationFactor);
+      ReplicationFactor replicationFactor, ReplicationType replicationType) {
+    Pipeline pipeline = findOpenPipeline(replicationType, replicationFactor);
     if (pipeline != null) {
-      LOG.debug("created new pipeline:{} for container with " +
+      LOG.debug("re-used pipeline:{} for container with " +
               "replicationType:{} replicationFactor:{}",
           pipeline.getPipelineName(), replicationType, replicationFactor);
-      activePipelines.add(pipeline);
-      activePipelineMap.put(pipeline.getPipelineName(), pipeline);
-      node2PipelineMap.addPipeline(pipeline);
-    } else {
-      pipeline = findOpenPipeline(replicationType, replicationFactor);
-      if (pipeline != null) {
-        LOG.debug("re-used pipeline:{} for container with " +
-                "replicationType:{} replicationFactor:{}",
-            pipeline.getPipelineName(), replicationType, replicationFactor);
-      }
     }
     if (pipeline == null) {
       LOG.error("Get pipeline call failed. We are not able to find" +
-              "free nodes or operational pipeline.");
+              " operational pipeline.");
       return null;
     } else {
       return pipeline;
@@ -109,7 +84,7 @@ public abstract class PipelineManager {
   public synchronized final Pipeline getPipeline(String pipelineName) {
     Pipeline pipeline = null;
 
-    // 1. Check if pipeline channel already exists
+    // 1. Check if pipeline already exists
     if (activePipelineMap.containsKey(pipelineName)) {
       pipeline = activePipelineMap.get(pipelineName);
       LOG.debug("Returning pipeline for pipelineName:{}", pipelineName);
@@ -132,7 +107,13 @@ public abstract class PipelineManager {
   }
 
   public abstract Pipeline allocatePipeline(
-      ReplicationFactor replicationFactor) throws IOException;
+      ReplicationFactor replicationFactor);
+
+  /**
+   * Initialize the pipeline
+   * TODO: move the initialization to Ozone Client later
+   */
+  public abstract void initializePipeline(Pipeline pipeline) throws IOException;
 
   public void removePipeline(Pipeline pipeline) {
     activePipelines.remove(pipeline);
@@ -179,12 +160,23 @@ public abstract class PipelineManager {
   }
 
   /**
-   * Creates a pipeline from a specified set of Nodes.
-   * @param pipelineID - Name of the pipeline
-   * @param datanodes - The list of datanodes that make this pipeline.
+   * Creates a pipeline with a specified replication factor and type.
+   * @param replicationFactor - Replication Factor.
+   * @param replicationType - Replication Type.
    */
-  public abstract void createPipeline(String pipelineID,
-      List<DatanodeDetails> datanodes) throws IOException;
+  public Pipeline createPipeline(ReplicationFactor replicationFactor,
+      ReplicationType replicationType) throws IOException {
+    Pipeline pipeline = allocatePipeline(replicationFactor);
+    if (pipeline != null) {
+      LOG.debug("created new pipeline:{} for container with "
+              + "replicationType:{} replicationFactor:{}",
+          pipeline.getPipelineName(), replicationType, replicationFactor);
+      activePipelines.add(pipeline);
+      activePipelineMap.put(pipeline.getPipelineName(), pipeline);
+      node2PipelineMap.addPipeline(pipeline);
+    }
+    return pipeline;
+  }
 
   /**
    * Close the  pipeline with the given clusterId.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index 2955af5..08710e7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .SCMContainerPlacementRandom;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipelines.ratis.RatisManagerImpl;
 import org.apache.hadoop.hdds.scm.pipelines.standalone.StandaloneManagerImpl;
@@ -33,17 +34,28 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.statemachine
+    .InvalidStateTransitionException;
+import org.apache.hadoop.ozone.common.statemachine.StateMachine;
+import org.apache.hadoop.ozone.lease.Lease;
+import org.apache.hadoop.ozone.lease.LeaseException;
+import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
+    .FAILED_TO_CHANGE_PIPELINE_STATE;
+
 /**
  * Sends the request to the right pipeline manager.
  */
@@ -57,6 +69,10 @@ public class PipelineSelector {
   private final StandaloneManagerImpl standaloneManager;
   private final long containerSize;
   private final Node2PipelineMap node2PipelineMap;
+  private final LeaseManager<Pipeline> pipelineLeaseManager;
+  private final StateMachine<LifeCycleState,
+      HddsProtos.LifeCycleEvent> stateMachine;
+
   /**
    * Constructs a pipeline Selector.
    *
@@ -77,6 +93,74 @@ public class PipelineSelector {
     this.ratisManager =
         new RatisManagerImpl(this.nodeManager, placementPolicy, containerSize,
             conf, node2PipelineMap);
+    // Initialize the container state machine.
+    Set<HddsProtos.LifeCycleState> finalStates = new HashSet();
+    long pipelineCreationLeaseTimeout = conf.getTimeDuration(
+        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT,
+        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    LOG.trace("Starting Pipeline Lease Manager.");
+    pipelineLeaseManager = new LeaseManager<>(pipelineCreationLeaseTimeout);
+    pipelineLeaseManager.start();
+
+    // These are the steady states of a container.
+    finalStates.add(HddsProtos.LifeCycleState.OPEN);
+    finalStates.add(HddsProtos.LifeCycleState.CLOSED);
+
+    this.stateMachine = new StateMachine<>(HddsProtos.LifeCycleState.ALLOCATED,
+        finalStates);
+    initializeStateMachine();
+  }
+
+  /**
+   * Event and State Transition Mapping:
+   *
+   * State: ALLOCATED ---------------> CREATING
+   * Event:                CREATE
+   *
+   * State: CREATING  ---------------> OPEN
+   * Event:               CREATED
+   *
+   * State: OPEN      ---------------> CLOSING
+   * Event:               FINALIZE
+   *
+   * State: CLOSING   ---------------> CLOSED
+   * Event:                CLOSE
+   *
+   * State: CREATING  ---------------> CLOSED
+   * Event:               TIMEOUT
+   *
+   *
+   * Container State Flow:
+   *
+   * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]
+   *            (CREATE)     | (CREATED)     (FINALIZE)   |
+   *                         |                            |
+   *                         |                            |
+   *                         |(TIMEOUT)                   |(CLOSE)
+   *                         |                            |
+   *                         +--------> [CLOSED] <--------+
+   */
+  private void initializeStateMachine() {
+    stateMachine.addTransition(HddsProtos.LifeCycleState.ALLOCATED,
+        HddsProtos.LifeCycleState.CREATING,
+        HddsProtos.LifeCycleEvent.CREATE);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
+        HddsProtos.LifeCycleState.OPEN,
+        HddsProtos.LifeCycleEvent.CREATED);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.OPEN,
+        HddsProtos.LifeCycleState.CLOSING,
+        HddsProtos.LifeCycleEvent.FINALIZE);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.CLOSING,
+        HddsProtos.LifeCycleState.CLOSED,
+        HddsProtos.LifeCycleEvent.CLOSE);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
+        HddsProtos.LifeCycleState.CLOSED,
+        HddsProtos.LifeCycleEvent.TIMEOUT);
   }
 
   /**
@@ -88,15 +172,14 @@ public class PipelineSelector {
    * @return pipeline corresponding to nodes
    */
   public static Pipeline newPipelineFromNodes(
-      List<DatanodeDetails> nodes, LifeCycleState state,
-      ReplicationType replicationType, ReplicationFactor replicationFactor,
-      String name) {
+      List<DatanodeDetails> nodes, ReplicationType replicationType,
+      ReplicationFactor replicationFactor, String name) {
     Preconditions.checkNotNull(nodes);
     Preconditions.checkArgument(nodes.size() > 0);
     String leaderId = nodes.get(0).getUuidString();
-    Pipeline
-        pipeline = new Pipeline(leaderId, state, replicationType,
-        replicationFactor, name);
+    // A new pipeline always starts in allocated state
+    Pipeline pipeline = new Pipeline(leaderId, LifeCycleState.ALLOCATED,
+        replicationType, replicationFactor, name);
     for (DatanodeDetails node : nodes) {
       pipeline.addMember(node);
     }
@@ -175,8 +258,35 @@ public class PipelineSelector {
     LOG.debug("Getting replication pipeline forReplicationType {} :" +
             " ReplicationFactor {}", replicationType.toString(),
         replicationFactor.toString());
-    return manager.
-        getPipeline(replicationFactor, replicationType);
+
+    /**
+     * In the Ozone world, we have a very simple policy.
+     *
+     * 1. Try to create a pipeline if there are enough free nodes.
+     *
+     * 2. This allows all nodes to part of a pipeline quickly.
+     *
+     * 3. if there are not enough free nodes, return already allocated pipeline
+     * in a round-robin fashion.
+     *
+     * TODO: Might have to come up with a better algorithm than this.
+     * Create a new placement policy that returns pipelines in round robin
+     * fashion.
+     */
+    Pipeline pipeline =
+        manager.createPipeline(replicationFactor, replicationType);
+    if (pipeline == null) {
+      // try to return a pipeline from already allocated pipelines
+      pipeline = manager.getPipeline(replicationFactor, replicationType);
+    } else {
+      // if a new pipeline is created, initialize its state machine
+      updatePipelineState(pipeline,HddsProtos.LifeCycleEvent.CREATE);
+
+      //TODO: move the initialization of pipeline to Ozone Client
+      manager.initializePipeline(pipeline);
+      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATED);
+    }
+    return pipeline;
   }
 
   /**
@@ -194,19 +304,6 @@ public class PipelineSelector {
         " pipelineName:{}", replicationType, pipelineName);
     return manager.getPipeline(pipelineName);
   }
-  /**
-   * Creates a pipeline from a specified set of Nodes.
-   */
-
-  public void createPipeline(ReplicationType replicationType, String
-      pipelineID, List<DatanodeDetails> datanodes) throws IOException {
-    PipelineManager manager = getPipelineManager(replicationType);
-    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Creating a pipeline: {} with nodes:{}", pipelineID,
-        datanodes.stream().map(DatanodeDetails::toString)
-            .collect(Collectors.joining(",")));
-    manager.createPipeline(pipelineID, datanodes);
-  }
 
   /**
    * Close the  pipeline with the given clusterId.
@@ -251,12 +348,77 @@ public class PipelineSelector {
   }
 
   public void removePipeline(UUID dnId) {
-    Set<Pipeline> pipelineChannelSet =
+    Set<Pipeline> pipelineSet =
         node2PipelineMap.getPipelines(dnId);
-    for (Pipeline pipelineChannel : pipelineChannelSet) {
-      getPipelineManager(pipelineChannel.getType())
-          .removePipeline(pipelineChannel);
+    for (Pipeline pipeline : pipelineSet) {
+      getPipelineManager(pipeline.getType())
+          .removePipeline(pipeline);
     }
     node2PipelineMap.removeDatanode(dnId);
   }
+
+  /**
+   * Update the Pipeline State to the next state.
+   *
+   * @param pipeline - Pipeline
+   * @param event - LifeCycle Event
+   * @throws SCMException  on Failure.
+   */
+  public void updatePipelineState(Pipeline pipeline,
+      HddsProtos.LifeCycleEvent event) throws IOException {
+    HddsProtos.LifeCycleState newState;
+    try {
+      newState = stateMachine.getNextState(pipeline.getLifeCycleState(), event);
+    } catch (InvalidStateTransitionException ex) {
+      String error = String.format("Failed to update pipeline state %s, " +
+              "reason: invalid state transition from state: %s upon " +
+              "event: %s.",
+          pipeline.getPipelineName(), pipeline.getLifeCycleState(), event);
+      LOG.error(error);
+      throw new SCMException(error, FAILED_TO_CHANGE_PIPELINE_STATE);
+    }
+
+    // This is a post condition after executing getNextState.
+    Preconditions.checkNotNull(newState);
+    Preconditions.checkNotNull(pipeline);
+    try {
+      switch (event) {
+      case CREATE:
+        // Acquire lease on pipeline
+        Lease<Pipeline> pipelineLease = pipelineLeaseManager.acquire(pipeline);
+        // Register callback to be executed in case of timeout
+        pipelineLease.registerCallBack(() -> {
+          updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.TIMEOUT);
+          return null;
+        });
+        break;
+      case CREATED:
+        // Release the lease on pipeline
+        pipelineLeaseManager.release(pipeline);
+        break;
+
+      case FINALIZE:
+        //TODO: cleanup pipeline by closing all the containers on the pipeline
+        break;
+
+      case CLOSE:
+      case TIMEOUT:
+        // TODO: Release the nodes here when pipelines are destroyed
+        break;
+      default:
+        throw new SCMException("Unsupported pipeline LifeCycleEvent.",
+            FAILED_TO_CHANGE_PIPELINE_STATE);
+      }
+
+      pipeline.setLifeCycleState(newState);
+    } catch (LeaseException e) {
+      throw new IOException("Lease Exception.", e);
+    }
+  }
+
+  public void shutdown() {
+    if (pipelineLeaseManager != null) {
+      pipelineLeaseManager.shutdown();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
index a8f8b20..c726ef6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.pipelines.Node2PipelineMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -72,7 +71,7 @@ public class RatisManagerImpl extends PipelineManager {
    * Allocates a new ratis Pipeline from the free nodes.
    *
    * @param factor - One or Three
-   * @return PipelineChannel.
+   * @return Pipeline.
    */
   public Pipeline allocatePipeline(ReplicationFactor factor) {
     List<DatanodeDetails> newNodesList = new LinkedList<>();
@@ -89,35 +88,23 @@ public class RatisManagerImpl extends PipelineManager {
           // further allocations
           ratisMembers.addAll(newNodesList);
           LOG.info("Allocating a new ratis pipeline of size: {}", count);
-          // Start all channel names with "Ratis", easy to grep the logs.
+          // Start all pipeline names with "Ratis", easy to grep the logs.
           String pipelineName = PREFIX +
               UUID.randomUUID().toString().substring(PREFIX.length());
-          Pipeline pipeline=
-              PipelineSelector.newPipelineFromNodes(newNodesList,
-              LifeCycleState.OPEN, ReplicationType.RATIS, factor, pipelineName);
-          try (XceiverClientRatis client =
-              XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
-            client.createPipeline(pipeline.getPipelineName(), newNodesList);
-          } catch (IOException e) {
-            return null;
-          }
-          return pipeline;
+          return PipelineSelector.newPipelineFromNodes(newNodesList,
+              ReplicationType.RATIS, factor, pipelineName);
         }
       }
     }
     return null;
   }
 
-  /**
-   * Creates a pipeline from a specified set of Nodes.
-   *
-   * @param pipelineID - Name of the pipeline
-   * @param datanodes - The list of datanodes that make this pipeline.
-   */
-  @Override
-  public void createPipeline(String pipelineID,
-                             List<DatanodeDetails> datanodes) {
-
+  public void initializePipeline(Pipeline pipeline) throws IOException {
+    //TODO:move the initialization from SCM to client
+    try (XceiverClientRatis client =
+        XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
+      client.createPipeline(pipeline.getPipelineName(), pipeline.getMachines());
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
index cf691bf..bb4951f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.pipelines.Node2PipelineMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -86,29 +85,19 @@ public class StandaloneManagerImpl extends PipelineManager {
           // once a datanode has been added to a pipeline, exclude it from
           // further allocations
           standAloneMembers.addAll(newNodesList);
-          LOG.info("Allocating a new standalone pipeline channel of size: {}",
-              count);
-          String channelName =
+          LOG.info("Allocating a new standalone pipeline of size: {}", count);
+          String pipelineName =
               "SA-" + UUID.randomUUID().toString().substring(3);
           return PipelineSelector.newPipelineFromNodes(newNodesList,
-              LifeCycleState.OPEN, ReplicationType.STAND_ALONE,
-              ReplicationFactor.ONE, channelName);
+              ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
         }
       }
     }
     return null;
   }
 
-  /**
-   * Creates a pipeline from a specified set of Nodes.
-   *
-   * @param pipelineID - Name of the pipeline
-   * @param datanodes - The list of datanodes that make this pipeline.
-   */
-  @Override
-  public void createPipeline(String pipelineID,
-                             List<DatanodeDetails> datanodes) {
-    //return newPipelineFromNodes(datanodes, pipelineID);
+  public void initializePipeline(Pipeline pipeline) {
+    // Nothing to be done for standalone pipeline
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index bc3505f..ffac6d5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.AfterClass;
@@ -51,6 +53,7 @@ public class TestNode2PipelineMap {
   private static ContainerWithPipeline ratisContainer;
   private static ContainerStateMap stateMap;
   private static ContainerMapping mapping;
+  private static PipelineSelector pipelineSelector;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -66,6 +69,7 @@ public class TestNode2PipelineMap {
     mapping = (ContainerMapping)scm.getScmContainerManager();
     stateMap = mapping.getStateManager().getContainerStateMap();
     ratisContainer = mapping.allocateContainer(RATIS, THREE, "testOwner");
+    pipelineSelector = mapping.getPipelineSelector();
   }
 
   /**
@@ -113,5 +117,15 @@ public class TestNode2PipelineMap {
     NavigableSet<ContainerID> set2 = stateMap.getOpenContainerIDsByPipeline(
         ratisContainer.getPipeline().getPipelineName());
     Assert.assertEquals(0, set2.size());
+
+    try {
+      pipelineSelector.updatePipelineState(ratisContainer.getPipeline(),
+          HddsProtos.LifeCycleEvent.CLOSE);
+      Assert.fail("closing of pipeline without finalize should fail");
+    } catch (Exception e) {
+      Assert.assertTrue(e instanceof SCMException);
+      Assert.assertEquals(((SCMException)e).getResult(),
+          SCMException.ResultCodes.FAILED_TO_CHANGE_PIPELINE_STATE);
+    }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] hadoop git commit: Revert "HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh."

Posted by in...@apache.org.
Revert "HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh."

This reverts commit 6837121a43231f854b0b22ad20330012439313ce.(Mixed with HDDS-260)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2acf8d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2acf8d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2acf8d5

Branch: refs/heads/HADOOP-15461
Commit: d2acf8d560950f06ffbf5c217fbfab76cd70d5da
Parents: c7ae556
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jul 20 14:20:18 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 20 14:20:18 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   5 -
 .../scm/container/common/helpers/Pipeline.java  |   7 -
 .../common/src/main/resources/ozone-default.xml |  12 -
 .../common/statemachine/StateContext.java       |  52 +---
 .../states/endpoint/HeartbeatEndpointTask.java  |  24 +-
 .../StorageContainerDatanodeProtocol.proto      |   4 +-
 .../common/report/TestReportPublisher.java      |  41 +++
 .../endpoint/TestHeartbeatEndpointTask.java     | 302 -------------------
 .../common/states/endpoint/package-info.java    |  18 --
 .../hdds/scm/container/ContainerMapping.java    |   4 -
 .../hdds/scm/exceptions/SCMException.java       |   1 -
 .../hdds/scm/pipelines/PipelineManager.java     |  64 ++--
 .../hdds/scm/pipelines/PipelineSelector.java    | 212 ++-----------
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  33 +-
 .../standalone/StandaloneManagerImpl.java       |  21 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |  14 -
 16 files changed, 146 insertions(+), 668 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 6e940ad..71184cf 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -236,11 +236,6 @@ public final class ScmConfigKeys {
   public static final String
       OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
 
-  public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT =
-      "ozone.scm.pipeline.creation.lease.timeout";
-
-  public static final String
-      OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
 
   public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
       "ozone.scm.block.deletion.max.retry";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
index 534c9fd..c5794f4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -214,13 +214,6 @@ public class Pipeline {
   }
 
   /**
-   * Update the State of the pipeline.
-   */
-  public void setLifeCycleState(HddsProtos.LifeCycleState nextState) {
-     lifeCycleState = nextState;
-  }
-
-  /**
    * Gets the pipeline Name.
    *
    * @return - Name of the pipeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 69a382a..5a1d26a 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1085,17 +1085,5 @@
       executed since last report. Unit could be defined with
       postfix (ns,ms,s,m,h,d)</description>
   </property>
-  <property>
-    <name>ozone.scm.pipeline.creation.lease.timeout</name>
-    <value>60s</value>
-    <tag>OZONE, SCM, PIPELINE</tag>
-    <description>
-      Pipeline creation timeout in milliseconds to be used by SCM. When
-      BEGIN_CREATE event happens the pipeline is moved from ALLOCATED to
-      CREATING state, SCM will now wait for the configured amount of time
-      to get COMPLETE_CREATE event if it doesn't receive it will move the
-      pipeline to DELETING.
-    </description>
-  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 4951f2a..faaff69 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -20,18 +20,14 @@ import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
 import org.apache.hadoop.ozone.container.common.states.datanode
     .InitDatanodeState;
 import org.apache.hadoop.ozone.container.common.states.datanode
     .RunningDatanodeState;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus
-    .CommandStatusBuilder;
+import org.apache.hadoop.ozone.protocol.commands.CommandStatus.CommandStatusBuilder;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -47,7 +43,6 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
-import java.util.stream.Collectors;
 
 import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
 
@@ -64,7 +59,6 @@ public class StateContext {
   private final AtomicLong stateExecutionCount;
   private final Configuration conf;
   private final Queue<GeneratedMessage> reports;
-  private final Queue<ContainerAction> containerActions;
   private DatanodeStateMachine.DatanodeStates state;
 
   /**
@@ -82,7 +76,6 @@ public class StateContext {
     commandQueue = new LinkedList<>();
     cmdStatusMap = new ConcurrentHashMap<>();
     reports = new LinkedList<>();
-    containerActions = new LinkedList<>();
     lock = new ReentrantLock();
     stateExecutionCount = new AtomicLong(0);
   }
@@ -205,47 +198,6 @@ public class StateContext {
     return results;
   }
 
-
-  /**
-   * Adds the ContainerAction to ContainerAction queue.
-   *
-   * @param containerAction ContainerAction to be added
-   */
-  public void addContainerAction(ContainerAction containerAction) {
-    synchronized (containerActions) {
-      containerActions.add(containerAction);
-    }
-  }
-
-  /**
-   * Returns all the pending ContainerActions from the ContainerAction queue,
-   * or empty list if the queue is empty.
-   *
-   * @return List<ContainerAction>
-   */
-  public List<ContainerAction> getAllPendingContainerActions() {
-    return getPendingContainerAction(Integer.MAX_VALUE);
-  }
-
-  /**
-   * Returns pending ContainerActions from the ContainerAction queue with a
-   * max limit on list size, or empty list if the queue is empty.
-   *
-   * @return List<ContainerAction>
-   */
-  public List<ContainerAction> getPendingContainerAction(int maxLimit) {
-    List<ContainerAction> results = new ArrayList<>();
-    synchronized (containerActions) {
-      containerActions.parallelStream().limit(maxLimit).collect(Collectors.toList());
-      ContainerAction action = containerActions.poll();
-      while(results.size() < maxLimit && action != null) {
-        results.add(action);
-        action = containerActions.poll();
-      }
-    }
-    return results;
-  }
-
   /**
    * Returns the next task to get executed by the datanode state machine.
    * @return A callable that will be executed by the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 214e1cd..260a245 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -25,10 +25,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -50,7 +46,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.time.ZonedDateTime;
-import java.util.List;
 import java.util.concurrent.Callable;
 
 /**
@@ -112,7 +107,7 @@ public class HeartbeatEndpointTask
           SCMHeartbeatRequestProto.newBuilder()
               .setDatanodeDetails(datanodeDetailsProto);
       addReports(requestBuilder);
-      addContainerActions(requestBuilder);
+
       SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
           .sendHeartbeat(requestBuilder.build());
       processResponse(reponse, datanodeDetailsProto);
@@ -145,23 +140,6 @@ public class HeartbeatEndpointTask
   }
 
   /**
-   * Adds all the pending ContainerActions to the heartbeat.
-   *
-   * @param requestBuilder builder to which the report has to be added.
-   */
-  private void addContainerActions(
-      SCMHeartbeatRequestProto.Builder requestBuilder) {
-    List<ContainerAction> actions = context.getAllPendingContainerActions();
-    if (!actions.isEmpty()) {
-      ContainerActionsProto cap = ContainerActionsProto.newBuilder()
-          .addAllContainerActions(actions)
-          .build();
-      requestBuilder.setContainerActions(cap);
-    }
-  }
-
-
-  /**
    * Returns a builder class for HeartbeatEndpointTask task.
    * @return   Builder.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index d89567b..4238389 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -79,8 +79,8 @@ message SCMHeartbeatRequestProto {
   required DatanodeDetailsProto datanodeDetails = 1;
   optional NodeReportProto nodeReport = 2;
   optional ContainerReportsProto containerReport = 3;
-  optional CommandStatusReportsProto commandStatusReport = 4;
-  optional ContainerActionsProto containerActions = 5;
+  optional ContainerActionsProto containerActions = 4;
+  optional CommandStatusReportsProto commandStatusReport = 5;
 }
 
 /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index 811599f..a0db2e8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.common.report;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.Descriptors;
 import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -27,8 +28,14 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
@@ -171,6 +178,22 @@ public class TestReportPublisher {
     executorService.shutdown();
   }
 
+  @Test
+  public void testAddingReportToHeartbeat() {
+    GeneratedMessage nodeReport = NodeReportProto.getDefaultInstance();
+    GeneratedMessage containerReport = ContainerReportsProto
+        .getDefaultInstance();
+    SCMHeartbeatRequestProto.Builder heartbeatBuilder =
+        SCMHeartbeatRequestProto.newBuilder();
+    heartbeatBuilder.setDatanodeDetails(
+        getDatanodeDetails().getProtoBufMessage());
+    addReport(heartbeatBuilder, nodeReport);
+    addReport(heartbeatBuilder, containerReport);
+    SCMHeartbeatRequestProto heartbeat = heartbeatBuilder.build();
+    Assert.assertTrue(heartbeat.hasNodeReport());
+    Assert.assertTrue(heartbeat.hasContainerReport());
+  }
+
   /**
    * Get a datanode details.
    *
@@ -199,4 +222,22 @@ public class TestReportPublisher {
     return builder.build();
   }
 
+  /**
+   * Adds the report to heartbeat.
+   *
+   * @param requestBuilder builder to which the report has to be added.
+   * @param report         the report to be added.
+   */
+  private static void addReport(SCMHeartbeatRequestProto.Builder
+      requestBuilder, GeneratedMessage report) {
+    String reportName = report.getDescriptorForType().getFullName();
+    for (Descriptors.FieldDescriptor descriptor :
+        SCMHeartbeatRequestProto.getDescriptor().getFields()) {
+      String heartbeatFieldName = descriptor.getMessageType().getFullName();
+      if (heartbeatFieldName.equals(reportName)) {
+        requestBuilder.setField(descriptor, report);
+      }
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
deleted file mode 100644
index 87bd811..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ /dev/null
@@ -1,302 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine.DatanodeStates;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-
-import java.util.UUID;
-
-import static org.mockito.ArgumentMatchers.any;
-
-/**
- * This class tests the functionality of HeartbeatEndpointTask.
- */
-public class TestHeartbeatEndpointTask {
-
-
-  @Test
-  public void testheartbeatWithoutReports() throws Exception {
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(scm);
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertFalse(heartbeat.hasCommandStatusReport());
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithNodeReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(NodeReportProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertFalse(heartbeat.hasCommandStatusReport());
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithContainerReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(ContainerReportsProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-    Assert.assertFalse(heartbeat.hasCommandStatusReport());
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithCommandStatusReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(CommandStatusReportsProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.hasCommandStatusReport());
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithContainerActions() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addContainerAction(getContainerAction());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertFalse(heartbeat.hasCommandStatusReport());
-    Assert.assertTrue(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithAllReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(NodeReportProto.getDefaultInstance());
-    context.addReport(ContainerReportsProto.getDefaultInstance());
-    context.addReport(CommandStatusReportsProto.getDefaultInstance());
-    context.addContainerAction(getContainerAction());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.hasCommandStatusReport());
-    Assert.assertTrue(heartbeat.hasContainerActions());
-  }
-
-  /**
-   * Creates HeartbeatEndpointTask for the given StorageContainerManager proxy.
-   *
-   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
-   *
-   * @return HeartbeatEndpointTask
-   */
-  private HeartbeatEndpointTask getHeartbeatEndpointTask(
-      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-    return getHeartbeatEndpointTask(conf, context, proxy);
-
-  }
-
-  /**
-   * Creates HeartbeatEndpointTask with the given conf, context and
-   * StorageContainerManager client side proxy.
-   *
-   * @param conf Configuration
-   * @param context StateContext
-   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
-   *
-   * @return HeartbeatEndpointTask
-   */
-  private HeartbeatEndpointTask getHeartbeatEndpointTask(
-      Configuration conf,
-      StateContext context,
-      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
-    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .build();
-    EndpointStateMachine endpointStateMachine = Mockito
-        .mock(EndpointStateMachine.class);
-    Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy);
-    return HeartbeatEndpointTask.newBuilder()
-        .setConfig(conf)
-        .setDatanodeDetails(datanodeDetails)
-        .setContext(context)
-        .setEndpointStateMachine(endpointStateMachine)
-        .build();
-  }
-
-  private ContainerAction getContainerAction() {
-    ContainerAction.Builder builder = ContainerAction.newBuilder();
-    ContainerInfo containerInfo = ContainerInfo.newBuilder()
-        .setContainerID(1L)
-        .build();
-    builder.setContainer(containerInfo)
-        .setAction(ContainerAction.Action.CLOSE)
-        .setReason(ContainerAction.Reason.CONTAINER_FULL);
-    return builder.build();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
deleted file mode 100644
index d120a5c..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index f07d22b..26f4d86 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -658,10 +658,6 @@ public class ContainerMapping implements Mapping {
     if (containerStore != null) {
       containerStore.close();
     }
-
-    if (pipelineSelector != null) {
-      pipelineSelector.shutdown();
-    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index 0085542..d7d70ef 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -107,7 +107,6 @@ public class SCMException extends IOException {
     FAILED_TO_LOAD_OPEN_CONTAINER,
     FAILED_TO_ALLOCATE_CONTAINER,
     FAILED_TO_CHANGE_CONTAINER_STATE,
-    FAILED_TO_CHANGE_PIPELINE_STATE,
     CONTAINER_EXISTS,
     FAILED_TO_FIND_CONTAINER,
     FAILED_TO_FIND_CONTAINER_WITH_SPACE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
index 77d8211..a041973 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
@@ -59,16 +59,41 @@ public abstract class PipelineManager {
    * @return a Pipeline.
    */
   public synchronized final Pipeline getPipeline(
-      ReplicationFactor replicationFactor, ReplicationType replicationType) {
-    Pipeline pipeline = findOpenPipeline(replicationType, replicationFactor);
+      ReplicationFactor replicationFactor, ReplicationType replicationType)
+      throws IOException {
+    /**
+     * In the Ozone world, we have a very simple policy.
+     *
+     * 1. Try to create a pipeline if there are enough free nodes.
+     *
+     * 2. This allows all nodes to part of a pipeline quickly.
+     *
+     * 3. if there are not enough free nodes, return pipeline in a
+     * round-robin fashion.
+     *
+     * TODO: Might have to come up with a better algorithm than this.
+     * Create a new placement policy that returns pipelines in round robin
+     * fashion.
+     */
+    Pipeline pipeline = allocatePipeline(replicationFactor);
     if (pipeline != null) {
-      LOG.debug("re-used pipeline:{} for container with " +
+      LOG.debug("created new pipeline:{} for container with " +
               "replicationType:{} replicationFactor:{}",
           pipeline.getPipelineName(), replicationType, replicationFactor);
+      activePipelines.add(pipeline);
+      activePipelineMap.put(pipeline.getPipelineName(), pipeline);
+      node2PipelineMap.addPipeline(pipeline);
+    } else {
+      pipeline = findOpenPipeline(replicationType, replicationFactor);
+      if (pipeline != null) {
+        LOG.debug("re-used pipeline:{} for container with " +
+                "replicationType:{} replicationFactor:{}",
+            pipeline.getPipelineName(), replicationType, replicationFactor);
+      }
     }
     if (pipeline == null) {
       LOG.error("Get pipeline call failed. We are not able to find" +
-              " operational pipeline.");
+              "free nodes or operational pipeline.");
       return null;
     } else {
       return pipeline;
@@ -84,7 +109,7 @@ public abstract class PipelineManager {
   public synchronized final Pipeline getPipeline(String pipelineName) {
     Pipeline pipeline = null;
 
-    // 1. Check if pipeline already exists
+    // 1. Check if pipeline channel already exists
     if (activePipelineMap.containsKey(pipelineName)) {
       pipeline = activePipelineMap.get(pipelineName);
       LOG.debug("Returning pipeline for pipelineName:{}", pipelineName);
@@ -107,13 +132,7 @@ public abstract class PipelineManager {
   }
 
   public abstract Pipeline allocatePipeline(
-      ReplicationFactor replicationFactor);
-
-  /**
-   * Initialize the pipeline
-   * TODO: move the initialization to Ozone Client later
-   */
-  public abstract void initializePipeline(Pipeline pipeline) throws IOException;
+      ReplicationFactor replicationFactor) throws IOException;
 
   public void removePipeline(Pipeline pipeline) {
     activePipelines.remove(pipeline);
@@ -160,23 +179,12 @@ public abstract class PipelineManager {
   }
 
   /**
-   * Creates a pipeline with a specified replication factor and type.
-   * @param replicationFactor - Replication Factor.
-   * @param replicationType - Replication Type.
+   * Creates a pipeline from a specified set of Nodes.
+   * @param pipelineID - Name of the pipeline
+   * @param datanodes - The list of datanodes that make this pipeline.
    */
-  public Pipeline createPipeline(ReplicationFactor replicationFactor,
-      ReplicationType replicationType) throws IOException {
-    Pipeline pipeline = allocatePipeline(replicationFactor);
-    if (pipeline != null) {
-      LOG.debug("created new pipeline:{} for container with "
-              + "replicationType:{} replicationFactor:{}",
-          pipeline.getPipelineName(), replicationType, replicationFactor);
-      activePipelines.add(pipeline);
-      activePipelineMap.put(pipeline.getPipelineName(), pipeline);
-      node2PipelineMap.addPipeline(pipeline);
-    }
-    return pipeline;
-  }
+  public abstract void createPipeline(String pipelineID,
+      List<DatanodeDetails> datanodes) throws IOException;
 
   /**
    * Close the  pipeline with the given clusterId.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index 08710e7..2955af5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .SCMContainerPlacementRandom;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipelines.ratis.RatisManagerImpl;
 import org.apache.hadoop.hdds.scm.pipelines.standalone.StandaloneManagerImpl;
@@ -34,28 +33,17 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.statemachine
-    .InvalidStateTransitionException;
-import org.apache.hadoop.ozone.common.statemachine.StateMachine;
-import org.apache.hadoop.ozone.lease.Lease;
-import org.apache.hadoop.ozone.lease.LeaseException;
-import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .FAILED_TO_CHANGE_PIPELINE_STATE;
-
 /**
  * Sends the request to the right pipeline manager.
  */
@@ -69,10 +57,6 @@ public class PipelineSelector {
   private final StandaloneManagerImpl standaloneManager;
   private final long containerSize;
   private final Node2PipelineMap node2PipelineMap;
-  private final LeaseManager<Pipeline> pipelineLeaseManager;
-  private final StateMachine<LifeCycleState,
-      HddsProtos.LifeCycleEvent> stateMachine;
-
   /**
    * Constructs a pipeline Selector.
    *
@@ -93,74 +77,6 @@ public class PipelineSelector {
     this.ratisManager =
         new RatisManagerImpl(this.nodeManager, placementPolicy, containerSize,
             conf, node2PipelineMap);
-    // Initialize the container state machine.
-    Set<HddsProtos.LifeCycleState> finalStates = new HashSet();
-    long pipelineCreationLeaseTimeout = conf.getTimeDuration(
-        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT,
-        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    LOG.trace("Starting Pipeline Lease Manager.");
-    pipelineLeaseManager = new LeaseManager<>(pipelineCreationLeaseTimeout);
-    pipelineLeaseManager.start();
-
-    // These are the steady states of a container.
-    finalStates.add(HddsProtos.LifeCycleState.OPEN);
-    finalStates.add(HddsProtos.LifeCycleState.CLOSED);
-
-    this.stateMachine = new StateMachine<>(HddsProtos.LifeCycleState.ALLOCATED,
-        finalStates);
-    initializeStateMachine();
-  }
-
-  /**
-   * Event and State Transition Mapping:
-   *
-   * State: ALLOCATED ---------------> CREATING
-   * Event:                CREATE
-   *
-   * State: CREATING  ---------------> OPEN
-   * Event:               CREATED
-   *
-   * State: OPEN      ---------------> CLOSING
-   * Event:               FINALIZE
-   *
-   * State: CLOSING   ---------------> CLOSED
-   * Event:                CLOSE
-   *
-   * State: CREATING  ---------------> CLOSED
-   * Event:               TIMEOUT
-   *
-   *
-   * Container State Flow:
-   *
-   * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]
-   *            (CREATE)     | (CREATED)     (FINALIZE)   |
-   *                         |                            |
-   *                         |                            |
-   *                         |(TIMEOUT)                   |(CLOSE)
-   *                         |                            |
-   *                         +--------> [CLOSED] <--------+
-   */
-  private void initializeStateMachine() {
-    stateMachine.addTransition(HddsProtos.LifeCycleState.ALLOCATED,
-        HddsProtos.LifeCycleState.CREATING,
-        HddsProtos.LifeCycleEvent.CREATE);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
-        HddsProtos.LifeCycleState.OPEN,
-        HddsProtos.LifeCycleEvent.CREATED);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.OPEN,
-        HddsProtos.LifeCycleState.CLOSING,
-        HddsProtos.LifeCycleEvent.FINALIZE);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.CLOSING,
-        HddsProtos.LifeCycleState.CLOSED,
-        HddsProtos.LifeCycleEvent.CLOSE);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
-        HddsProtos.LifeCycleState.CLOSED,
-        HddsProtos.LifeCycleEvent.TIMEOUT);
   }
 
   /**
@@ -172,14 +88,15 @@ public class PipelineSelector {
    * @return pipeline corresponding to nodes
    */
   public static Pipeline newPipelineFromNodes(
-      List<DatanodeDetails> nodes, ReplicationType replicationType,
-      ReplicationFactor replicationFactor, String name) {
+      List<DatanodeDetails> nodes, LifeCycleState state,
+      ReplicationType replicationType, ReplicationFactor replicationFactor,
+      String name) {
     Preconditions.checkNotNull(nodes);
     Preconditions.checkArgument(nodes.size() > 0);
     String leaderId = nodes.get(0).getUuidString();
-    // A new pipeline always starts in allocated state
-    Pipeline pipeline = new Pipeline(leaderId, LifeCycleState.ALLOCATED,
-        replicationType, replicationFactor, name);
+    Pipeline
+        pipeline = new Pipeline(leaderId, state, replicationType,
+        replicationFactor, name);
     for (DatanodeDetails node : nodes) {
       pipeline.addMember(node);
     }
@@ -258,35 +175,8 @@ public class PipelineSelector {
     LOG.debug("Getting replication pipeline forReplicationType {} :" +
             " ReplicationFactor {}", replicationType.toString(),
         replicationFactor.toString());
-
-    /**
-     * In the Ozone world, we have a very simple policy.
-     *
-     * 1. Try to create a pipeline if there are enough free nodes.
-     *
-     * 2. This allows all nodes to part of a pipeline quickly.
-     *
-     * 3. if there are not enough free nodes, return already allocated pipeline
-     * in a round-robin fashion.
-     *
-     * TODO: Might have to come up with a better algorithm than this.
-     * Create a new placement policy that returns pipelines in round robin
-     * fashion.
-     */
-    Pipeline pipeline =
-        manager.createPipeline(replicationFactor, replicationType);
-    if (pipeline == null) {
-      // try to return a pipeline from already allocated pipelines
-      pipeline = manager.getPipeline(replicationFactor, replicationType);
-    } else {
-      // if a new pipeline is created, initialize its state machine
-      updatePipelineState(pipeline,HddsProtos.LifeCycleEvent.CREATE);
-
-      //TODO: move the initialization of pipeline to Ozone Client
-      manager.initializePipeline(pipeline);
-      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATED);
-    }
-    return pipeline;
+    return manager.
+        getPipeline(replicationFactor, replicationType);
   }
 
   /**
@@ -304,6 +194,19 @@ public class PipelineSelector {
         " pipelineName:{}", replicationType, pipelineName);
     return manager.getPipeline(pipelineName);
   }
+  /**
+   * Creates a pipeline from a specified set of Nodes.
+   */
+
+  public void createPipeline(ReplicationType replicationType, String
+      pipelineID, List<DatanodeDetails> datanodes) throws IOException {
+    PipelineManager manager = getPipelineManager(replicationType);
+    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
+    LOG.debug("Creating a pipeline: {} with nodes:{}", pipelineID,
+        datanodes.stream().map(DatanodeDetails::toString)
+            .collect(Collectors.joining(",")));
+    manager.createPipeline(pipelineID, datanodes);
+  }
 
   /**
    * Close the  pipeline with the given clusterId.
@@ -348,77 +251,12 @@ public class PipelineSelector {
   }
 
   public void removePipeline(UUID dnId) {
-    Set<Pipeline> pipelineSet =
+    Set<Pipeline> pipelineChannelSet =
         node2PipelineMap.getPipelines(dnId);
-    for (Pipeline pipeline : pipelineSet) {
-      getPipelineManager(pipeline.getType())
-          .removePipeline(pipeline);
+    for (Pipeline pipelineChannel : pipelineChannelSet) {
+      getPipelineManager(pipelineChannel.getType())
+          .removePipeline(pipelineChannel);
     }
     node2PipelineMap.removeDatanode(dnId);
   }
-
-  /**
-   * Update the Pipeline State to the next state.
-   *
-   * @param pipeline - Pipeline
-   * @param event - LifeCycle Event
-   * @throws SCMException  on Failure.
-   */
-  public void updatePipelineState(Pipeline pipeline,
-      HddsProtos.LifeCycleEvent event) throws IOException {
-    HddsProtos.LifeCycleState newState;
-    try {
-      newState = stateMachine.getNextState(pipeline.getLifeCycleState(), event);
-    } catch (InvalidStateTransitionException ex) {
-      String error = String.format("Failed to update pipeline state %s, " +
-              "reason: invalid state transition from state: %s upon " +
-              "event: %s.",
-          pipeline.getPipelineName(), pipeline.getLifeCycleState(), event);
-      LOG.error(error);
-      throw new SCMException(error, FAILED_TO_CHANGE_PIPELINE_STATE);
-    }
-
-    // This is a post condition after executing getNextState.
-    Preconditions.checkNotNull(newState);
-    Preconditions.checkNotNull(pipeline);
-    try {
-      switch (event) {
-      case CREATE:
-        // Acquire lease on pipeline
-        Lease<Pipeline> pipelineLease = pipelineLeaseManager.acquire(pipeline);
-        // Register callback to be executed in case of timeout
-        pipelineLease.registerCallBack(() -> {
-          updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.TIMEOUT);
-          return null;
-        });
-        break;
-      case CREATED:
-        // Release the lease on pipeline
-        pipelineLeaseManager.release(pipeline);
-        break;
-
-      case FINALIZE:
-        //TODO: cleanup pipeline by closing all the containers on the pipeline
-        break;
-
-      case CLOSE:
-      case TIMEOUT:
-        // TODO: Release the nodes here when pipelines are destroyed
-        break;
-      default:
-        throw new SCMException("Unsupported pipeline LifeCycleEvent.",
-            FAILED_TO_CHANGE_PIPELINE_STATE);
-      }
-
-      pipeline.setLifeCycleState(newState);
-    } catch (LeaseException e) {
-      throw new IOException("Lease Exception.", e);
-    }
-  }
-
-  public void shutdown() {
-    if (pipelineLeaseManager != null) {
-      pipelineLeaseManager.shutdown();
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
index c726ef6..a8f8b20 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.pipelines.Node2PipelineMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -71,7 +72,7 @@ public class RatisManagerImpl extends PipelineManager {
    * Allocates a new ratis Pipeline from the free nodes.
    *
    * @param factor - One or Three
-   * @return Pipeline.
+   * @return PipelineChannel.
    */
   public Pipeline allocatePipeline(ReplicationFactor factor) {
     List<DatanodeDetails> newNodesList = new LinkedList<>();
@@ -88,23 +89,35 @@ public class RatisManagerImpl extends PipelineManager {
           // further allocations
           ratisMembers.addAll(newNodesList);
           LOG.info("Allocating a new ratis pipeline of size: {}", count);
-          // Start all pipeline names with "Ratis", easy to grep the logs.
+          // Start all channel names with "Ratis", easy to grep the logs.
           String pipelineName = PREFIX +
               UUID.randomUUID().toString().substring(PREFIX.length());
-          return PipelineSelector.newPipelineFromNodes(newNodesList,
-              ReplicationType.RATIS, factor, pipelineName);
+          Pipeline pipeline=
+              PipelineSelector.newPipelineFromNodes(newNodesList,
+              LifeCycleState.OPEN, ReplicationType.RATIS, factor, pipelineName);
+          try (XceiverClientRatis client =
+              XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
+            client.createPipeline(pipeline.getPipelineName(), newNodesList);
+          } catch (IOException e) {
+            return null;
+          }
+          return pipeline;
         }
       }
     }
     return null;
   }
 
-  public void initializePipeline(Pipeline pipeline) throws IOException {
-    //TODO:move the initialization from SCM to client
-    try (XceiverClientRatis client =
-        XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
-      client.createPipeline(pipeline.getPipelineName(), pipeline.getMachines());
-    }
+  /**
+   * Creates a pipeline from a specified set of Nodes.
+   *
+   * @param pipelineID - Name of the pipeline
+   * @param datanodes - The list of datanodes that make this pipeline.
+   */
+  @Override
+  public void createPipeline(String pipelineID,
+                             List<DatanodeDetails> datanodes) {
+
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
index bb4951f..cf691bf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.pipelines.Node2PipelineMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -85,19 +86,29 @@ public class StandaloneManagerImpl extends PipelineManager {
           // once a datanode has been added to a pipeline, exclude it from
           // further allocations
           standAloneMembers.addAll(newNodesList);
-          LOG.info("Allocating a new standalone pipeline of size: {}", count);
-          String pipelineName =
+          LOG.info("Allocating a new standalone pipeline channel of size: {}",
+              count);
+          String channelName =
               "SA-" + UUID.randomUUID().toString().substring(3);
           return PipelineSelector.newPipelineFromNodes(newNodesList,
-              ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
+              LifeCycleState.OPEN, ReplicationType.STAND_ALONE,
+              ReplicationFactor.ONE, channelName);
         }
       }
     }
     return null;
   }
 
-  public void initializePipeline(Pipeline pipeline) {
-    // Nothing to be done for standalone pipeline
+  /**
+   * Creates a pipeline from a specified set of Nodes.
+   *
+   * @param pipelineID - Name of the pipeline
+   * @param datanodes - The list of datanodes that make this pipeline.
+   */
+  @Override
+  public void createPipeline(String pipelineID,
+                             List<DatanodeDetails> datanodes) {
+    //return newPipelineFromNodes(datanodes, pipelineID);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index ffac6d5..bc3505f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -26,8 +26,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.AfterClass;
@@ -53,7 +51,6 @@ public class TestNode2PipelineMap {
   private static ContainerWithPipeline ratisContainer;
   private static ContainerStateMap stateMap;
   private static ContainerMapping mapping;
-  private static PipelineSelector pipelineSelector;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -69,7 +66,6 @@ public class TestNode2PipelineMap {
     mapping = (ContainerMapping)scm.getScmContainerManager();
     stateMap = mapping.getStateManager().getContainerStateMap();
     ratisContainer = mapping.allocateContainer(RATIS, THREE, "testOwner");
-    pipelineSelector = mapping.getPipelineSelector();
   }
 
   /**
@@ -117,15 +113,5 @@ public class TestNode2PipelineMap {
     NavigableSet<ContainerID> set2 = stateMap.getOpenContainerIDsByPipeline(
         ratisContainer.getPipeline().getPipelineName());
     Assert.assertEquals(0, set2.size());
-
-    try {
-      pipelineSelector.updatePipelineState(ratisContainer.getPipeline(),
-          HddsProtos.LifeCycleEvent.CLOSE);
-      Assert.fail("closing of pipeline without finalize should fail");
-    } catch (Exception e) {
-      Assert.assertTrue(e instanceof SCMException);
-      Assert.assertEquals(((SCMException)e).getResult(),
-          SCMException.ResultCodes.FAILED_TO_CHANGE_PIPELINE_STATE);
-    }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] hadoop git commit: HDDS-282. Consolidate logging in scm/container-service. Contributed by Elek Marton.

Posted by in...@apache.org.
HDDS-282. Consolidate logging in scm/container-service. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd0b9f13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd0b9f13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd0b9f13

Branch: refs/heads/HADOOP-15461
Commit: cd0b9f13805affcc91a2cba42b176bb9031378eb
Parents: 35ce6eb
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Jul 24 10:16:53 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Jul 24 10:17:03 2018 -0700

----------------------------------------------------------------------
 .../container/common/statemachine/EndpointStateMachine.java  | 4 ++--
 .../common/states/endpoint/RegisterEndpointTask.java         | 3 +--
 .../statemachine/background/BlockDeletingService.java        | 8 +++++---
 .../org/apache/hadoop/hdds/server/events/EventQueue.java     | 3 +--
 .../org/apache/hadoop/hdds/server/events/TypedEvent.java     | 6 ++++--
 5 files changed, 13 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
index 7e85923..fb32a05 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
@@ -203,11 +203,11 @@ public class EndpointStateMachine
     this.incMissed();
     if (this.getMissedCount() % getLogWarnInterval(conf) ==
         0) {
-      LOG.warn("Unable to communicate to SCM server at {}. We have not been " +
+      LOG.error("Unable to communicate to SCM server at {}. We have not been " +
               "able to communicate to this SCM server for past {} seconds.",
           this.getAddress().getHostString() + ":" + this.getAddress().getPort(),
           this.getMissedCount() * getScmHeartbeatInterval(
-              this.conf));
+              this.conf), ex);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index b3d2b62..25af4a1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -125,8 +125,7 @@ public final class RegisterEndpointTask implements
       rpcEndPoint.setState(nextState);
       rpcEndPoint.zeroMissedCount();
     } catch (IOException ex) {
-      rpcEndPoint.logIfNeeded(ex
-      );
+      rpcEndPoint.logIfNeeded(ex);
     } finally {
       rpcEndPoint.unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 4a572ca..51eed7f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -119,9 +119,11 @@ public class BlockDeletingService extends BackgroundService{
       // configured.
       containers = containerSet.chooseContainerForBlockDeletion(
           containerLimitPerInterval, containerDeletionPolicy);
-      LOG.info("Plan to choose {} containers for block deletion, "
-          + "actually returns {} valid containers.",
-          containerLimitPerInterval, containers.size());
+      if (containers.size() > 0) {
+        LOG.info("Plan to choose {} containers for block deletion, "
+                + "actually returns {} valid containers.",
+            containerLimitPerInterval, containers.size());
+      }
 
       for(ContainerData container : containers) {
         BlockDeletingTask containerTask =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 7e29223..f93c54b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -155,8 +155,7 @@ public class EventQueue implements EventPublisher, AutoCloseable {
       }
 
     } else {
-      throw new IllegalArgumentException(
-          "No event handler registered for event " + event);
+      LOG.warn("No event handler registered for event " + event);
     }
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
index 62e2419..27bba3a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
@@ -50,7 +50,9 @@ public class TypedEvent<T> implements Event<T> {
 
   @Override
   public String toString() {
-    return "TypedEvent{" + "payloadType=" + payloadType + ", name='" + name
-        + '\'' + '}';
+    return "TypedEvent{" +
+        "payloadType=" + payloadType.getSimpleName() +
+        ", name='" + name + '\'' +
+        '}';
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] hadoop git commit: HDDS-260. Support in Datanode for sending ContainerActions to SCM. Contributed by Nanda kumar.

Posted by in...@apache.org.
HDDS-260. Support in Datanode for sending ContainerActions to SCM. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/347c9550
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/347c9550
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/347c9550

Branch: refs/heads/HADOOP-15461
Commit: 347c9550135ea10fd84d5007124452bf5f2d6619
Parents: 9be25e3
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jul 20 14:37:13 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 20 14:37:13 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   6 +
 .../common/src/main/resources/ozone-default.xml |  10 +
 .../common/statemachine/StateContext.java       |  55 +++-
 .../states/endpoint/HeartbeatEndpointTask.java  |  33 +-
 .../StorageContainerDatanodeProtocol.proto      |   4 +-
 .../common/report/TestReportPublisher.java      |  41 ---
 .../endpoint/TestHeartbeatEndpointTask.java     | 300 +++++++++++++++++++
 .../common/states/endpoint/package-info.java    |  18 ++
 8 files changed, 414 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 0283615..fd4bf08 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -48,4 +48,10 @@ public final class HddsConfigKeys {
       "hdds.command.status.report.interval";
   public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
       "60s";
+
+  public static final String HDDS_CONTAINER_ACTION_MAX_LIMIT =
+      "hdds.container.action.max.limit";
+  public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT =
+      20;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 69a382a..84a3e0c 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1098,4 +1098,14 @@
     </description>
   </property>
 
+  <property>
+    <name>hdds.container.action.max.limit</name>
+    <value>20</value>
+    <tag>DATANODE</tag>
+    <description>
+      Maximum number of Container Actions sent by the datanode to SCM in a
+      single heartbeat.
+    </description>
+  </property>
+
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index faaff69..7862cc6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -20,14 +20,18 @@ import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
 import org.apache.hadoop.ozone.container.common.states.datanode
     .InitDatanodeState;
 import org.apache.hadoop.ozone.container.common.states.datanode
     .RunningDatanodeState;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus.CommandStatusBuilder;
+import org.apache.hadoop.ozone.protocol.commands.CommandStatus
+    .CommandStatusBuilder;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -43,6 +47,7 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
 
@@ -59,6 +64,7 @@ public class StateContext {
   private final AtomicLong stateExecutionCount;
   private final Configuration conf;
   private final Queue<GeneratedMessage> reports;
+  private final Queue<ContainerAction> containerActions;
   private DatanodeStateMachine.DatanodeStates state;
 
   /**
@@ -76,6 +82,7 @@ public class StateContext {
     commandQueue = new LinkedList<>();
     cmdStatusMap = new ConcurrentHashMap<>();
     reports = new LinkedList<>();
+    containerActions = new LinkedList<>();
     lock = new ReentrantLock();
     stateExecutionCount = new AtomicLong(0);
   }
@@ -187,15 +194,45 @@ public class StateContext {
    * @return List<reports>
    */
   public List<GeneratedMessage> getReports(int maxLimit) {
-    List<GeneratedMessage> results = new ArrayList<>();
     synchronized (reports) {
-      GeneratedMessage report = reports.poll();
-      while(results.size() < maxLimit && report != null) {
-        results.add(report);
-        report = reports.poll();
-      }
+      return reports.parallelStream().limit(maxLimit)
+          .collect(Collectors.toList());
+    }
+  }
+
+
+  /**
+   * Adds the ContainerAction to ContainerAction queue.
+   *
+   * @param containerAction ContainerAction to be added
+   */
+  public void addContainerAction(ContainerAction containerAction) {
+    synchronized (containerActions) {
+      containerActions.add(containerAction);
+    }
+  }
+
+  /**
+   * Returns all the pending ContainerActions from the ContainerAction queue,
+   * or empty list if the queue is empty.
+   *
+   * @return List<ContainerAction>
+   */
+  public List<ContainerAction> getAllPendingContainerActions() {
+    return getPendingContainerAction(Integer.MAX_VALUE);
+  }
+
+  /**
+   * Returns pending ContainerActions from the ContainerAction queue with a
+   * max limit on list size, or empty list if the queue is empty.
+   *
+   * @return List<ContainerAction>
+   */
+  public List<ContainerAction> getPendingContainerAction(int maxLimit) {
+    synchronized (containerActions) {
+      return containerActions.parallelStream().limit(maxLimit)
+          .collect(Collectors.toList());
     }
-    return results;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 260a245..020fb71 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -25,6 +25,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -46,8 +50,14 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.time.ZonedDateTime;
+import java.util.List;
 import java.util.concurrent.Callable;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_ACTION_MAX_LIMIT;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT;
+
 /**
  * Heartbeat class for SCMs.
  */
@@ -59,6 +69,7 @@ public class HeartbeatEndpointTask
   private final Configuration conf;
   private DatanodeDetailsProto datanodeDetailsProto;
   private StateContext context;
+  private int maxContainerActionsPerHB;
 
   /**
    * Constructs a SCM heart beat.
@@ -70,6 +81,8 @@ public class HeartbeatEndpointTask
     this.rpcEndpoint = rpcEndpoint;
     this.conf = conf;
     this.context = context;
+    this.maxContainerActionsPerHB = conf.getInt(HDDS_CONTAINER_ACTION_MAX_LIMIT,
+        HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT);
   }
 
   /**
@@ -107,7 +120,7 @@ public class HeartbeatEndpointTask
           SCMHeartbeatRequestProto.newBuilder()
               .setDatanodeDetails(datanodeDetailsProto);
       addReports(requestBuilder);
-
+      addContainerActions(requestBuilder);
       SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
           .sendHeartbeat(requestBuilder.build());
       processResponse(reponse, datanodeDetailsProto);
@@ -140,6 +153,24 @@ public class HeartbeatEndpointTask
   }
 
   /**
+   * Adds all the pending ContainerActions to the heartbeat.
+   *
+   * @param requestBuilder builder to which the report has to be added.
+   */
+  private void addContainerActions(
+      SCMHeartbeatRequestProto.Builder requestBuilder) {
+    List<ContainerAction> actions = context.getPendingContainerAction(
+        maxContainerActionsPerHB);
+    if (!actions.isEmpty()) {
+      ContainerActionsProto cap = ContainerActionsProto.newBuilder()
+          .addAllContainerActions(actions)
+          .build();
+      requestBuilder.setContainerActions(cap);
+    }
+  }
+
+
+  /**
    * Returns a builder class for HeartbeatEndpointTask task.
    * @return   Builder.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 4238389..d89567b 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -79,8 +79,8 @@ message SCMHeartbeatRequestProto {
   required DatanodeDetailsProto datanodeDetails = 1;
   optional NodeReportProto nodeReport = 2;
   optional ContainerReportsProto containerReport = 3;
-  optional ContainerActionsProto containerActions = 4;
-  optional CommandStatusReportsProto commandStatusReport = 5;
+  optional CommandStatusReportsProto commandStatusReport = 4;
+  optional ContainerActionsProto containerActions = 5;
 }
 
 /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index a0db2e8..811599f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.container.common.report;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.Descriptors;
 import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -28,14 +27,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
@@ -178,22 +171,6 @@ public class TestReportPublisher {
     executorService.shutdown();
   }
 
-  @Test
-  public void testAddingReportToHeartbeat() {
-    GeneratedMessage nodeReport = NodeReportProto.getDefaultInstance();
-    GeneratedMessage containerReport = ContainerReportsProto
-        .getDefaultInstance();
-    SCMHeartbeatRequestProto.Builder heartbeatBuilder =
-        SCMHeartbeatRequestProto.newBuilder();
-    heartbeatBuilder.setDatanodeDetails(
-        getDatanodeDetails().getProtoBufMessage());
-    addReport(heartbeatBuilder, nodeReport);
-    addReport(heartbeatBuilder, containerReport);
-    SCMHeartbeatRequestProto heartbeat = heartbeatBuilder.build();
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-  }
-
   /**
    * Get a datanode details.
    *
@@ -222,22 +199,4 @@ public class TestReportPublisher {
     return builder.build();
   }
 
-  /**
-   * Adds the report to heartbeat.
-   *
-   * @param requestBuilder builder to which the report has to be added.
-   * @param report         the report to be added.
-   */
-  private static void addReport(SCMHeartbeatRequestProto.Builder
-      requestBuilder, GeneratedMessage report) {
-    String reportName = report.getDescriptorForType().getFullName();
-    for (Descriptors.FieldDescriptor descriptor :
-        SCMHeartbeatRequestProto.getDescriptor().getFields()) {
-      String heartbeatFieldName = descriptor.getMessageType().getFullName();
-      if (heartbeatFieldName.equals(reportName)) {
-        requestBuilder.setField(descriptor, report);
-      }
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
new file mode 100644
index 0000000..b4d718d
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -0,0 +1,300 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.states.endpoint;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .DatanodeStateMachine.DatanodeStates;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.protocolPB
+    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+
+import java.util.UUID;
+
+/**
+ * This class tests the functionality of HeartbeatEndpointTask.
+ */
+public class TestHeartbeatEndpointTask {
+
+
+  @Test
+  public void testheartbeatWithoutReports() throws Exception {
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(scm);
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithNodeReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(NodeReportProto.getDefaultInstance());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertTrue(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithContainerReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(ContainerReportsProto.getDefaultInstance());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertTrue(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithCommandStatusReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(CommandStatusReportsProto.getDefaultInstance());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertTrue(heartbeat.hasCommandStatusReport());
+    Assert.assertFalse(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithContainerActions() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addContainerAction(getContainerAction());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertFalse(heartbeat.hasNodeReport());
+    Assert.assertFalse(heartbeat.hasContainerReport());
+    Assert.assertFalse(heartbeat.hasCommandStatusReport());
+    Assert.assertTrue(heartbeat.hasContainerActions());
+  }
+
+  @Test
+  public void testheartbeatWithAllReports() throws Exception {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+
+    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
+        Mockito.mock(
+            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
+    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
+        .forClass(SCMHeartbeatRequestProto.class);
+    Mockito.when(scm.sendHeartbeat(argument.capture()))
+        .thenAnswer(invocation ->
+            SCMHeartbeatResponseProto.newBuilder()
+                .setDatanodeUUID(
+                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
+                        .getDatanodeDetails().getUuid())
+                .build());
+
+    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
+        conf, context, scm);
+    context.addReport(NodeReportProto.getDefaultInstance());
+    context.addReport(ContainerReportsProto.getDefaultInstance());
+    context.addReport(CommandStatusReportsProto.getDefaultInstance());
+    context.addContainerAction(getContainerAction());
+    endpointTask.call();
+    SCMHeartbeatRequestProto heartbeat = argument.getValue();
+    Assert.assertTrue(heartbeat.hasDatanodeDetails());
+    Assert.assertTrue(heartbeat.hasNodeReport());
+    Assert.assertTrue(heartbeat.hasContainerReport());
+    Assert.assertTrue(heartbeat.hasCommandStatusReport());
+    Assert.assertTrue(heartbeat.hasContainerActions());
+  }
+
+  /**
+   * Creates HeartbeatEndpointTask for the given StorageContainerManager proxy.
+   *
+   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
+   *
+   * @return HeartbeatEndpointTask
+   */
+  private HeartbeatEndpointTask getHeartbeatEndpointTask(
+      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
+    Configuration conf = new OzoneConfiguration();
+    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
+        Mockito.mock(DatanodeStateMachine.class));
+    return getHeartbeatEndpointTask(conf, context, proxy);
+
+  }
+
+  /**
+   * Creates HeartbeatEndpointTask with the given conf, context and
+   * StorageContainerManager client side proxy.
+   *
+   * @param conf Configuration
+   * @param context StateContext
+   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
+   *
+   * @return HeartbeatEndpointTask
+   */
+  private HeartbeatEndpointTask getHeartbeatEndpointTask(
+      Configuration conf,
+      StateContext context,
+      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
+    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
+        .setUuid(UUID.randomUUID().toString())
+        .setHostName("localhost")
+        .setIpAddress("127.0.0.1")
+        .build();
+    EndpointStateMachine endpointStateMachine = Mockito
+        .mock(EndpointStateMachine.class);
+    Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy);
+    return HeartbeatEndpointTask.newBuilder()
+        .setConfig(conf)
+        .setDatanodeDetails(datanodeDetails)
+        .setContext(context)
+        .setEndpointStateMachine(endpointStateMachine)
+        .build();
+  }
+
+  private ContainerAction getContainerAction() {
+    ContainerAction.Builder builder = ContainerAction.newBuilder();
+    ContainerInfo containerInfo = ContainerInfo.newBuilder()
+        .setContainerID(1L)
+        .build();
+    builder.setContainer(containerInfo)
+        .setAction(ContainerAction.Action.CLOSE)
+        .setReason(ContainerAction.Reason.CONTAINER_FULL);
+    return builder.build();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
new file mode 100644
index 0000000..d120a5c
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.states.endpoint;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] hadoop git commit: HDDS-256. Adding CommandStatusReport Handler. Contributed by Ajay Kumar.

Posted by in...@apache.org.
HDDS-256. Adding CommandStatusReport Handler. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89a0f807
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89a0f807
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89a0f807

Branch: refs/heads/HADOOP-15461
Commit: 89a0f80741beb5a998f143849e797d780332048b
Parents: 8a6bb84
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jul 20 11:03:33 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 20 11:07:09 2018 -0700

----------------------------------------------------------------------
 .../scm/command/CommandStatusReportHandler.java | 129 +++++++++++++++++
 .../hadoop/hdds/scm/command/package-info.java   |  26 ++++
 .../hadoop/hdds/scm/events/SCMEvents.java       |  24 ++++
 .../scm/server/StorageContainerManager.java     |   4 +
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  18 +++
 .../command/TestCommandStatusReportHandler.java | 137 +++++++++++++++++++
 .../hadoop/hdds/scm/command/package-info.java   |  22 +++
 7 files changed, 360 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
new file mode 100644
index 0000000..9413a46
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.command;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatus;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+    .CommandStatusReportFromDatanode;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+
+/**
+ * Handles CommandStatusReports from datanode.
+ */
+public class CommandStatusReportHandler implements
+    EventHandler<CommandStatusReportFromDatanode> {
+
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(CommandStatusReportHandler.class);
+
+  @Override
+  public void onMessage(CommandStatusReportFromDatanode report,
+      EventPublisher publisher) {
+    Preconditions.checkNotNull(report);
+    List<CommandStatus> cmdStatusList = report.getReport().getCmdStatusList();
+    Preconditions.checkNotNull(cmdStatusList);
+    LOGGER.trace("Processing command status report for dn: {}", report
+        .getDatanodeDetails());
+
+    // Route command status to its watchers.
+    cmdStatusList.forEach(cmdStatus -> {
+      LOGGER.trace("Emitting command status for id:{} type: {}", cmdStatus
+          .getCmdId(), cmdStatus.getType());
+      switch (cmdStatus.getType()) {
+      case replicateContainerCommand:
+        publisher.fireEvent(SCMEvents.REPLICATION_STATUS, new
+            ReplicationStatus(cmdStatus));
+        break;
+      case closeContainerCommand:
+        publisher.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
+            CloseContainerStatus(cmdStatus));
+        break;
+      case deleteBlocksCommand:
+        publisher.fireEvent(SCMEvents.DELETE_BLOCK_STATUS, new
+            DeleteBlockCommandStatus(cmdStatus));
+        break;
+      default:
+        LOGGER.debug("CommandStatus of type:{} not handled in " +
+            "CommandStatusReportHandler.", cmdStatus.getType());
+        break;
+      }
+    });
+  }
+
+  /**
+   * Wrapper event for CommandStatus.
+   */
+  public static class CommandStatusEvent implements IdentifiableEventPayload {
+    private CommandStatus cmdStatus;
+
+    CommandStatusEvent(CommandStatus cmdStatus) {
+      this.cmdStatus = cmdStatus;
+    }
+
+    public CommandStatus getCmdStatus() {
+      return cmdStatus;
+    }
+
+    @Override
+    public String toString() {
+      return "CommandStatusEvent:" + cmdStatus.toString();
+    }
+
+    @Override
+    public long getId() {
+      return cmdStatus.getCmdId();
+    }
+  }
+
+  /**
+   * Wrapper event for Replicate Command.
+   */
+  public static class ReplicationStatus extends CommandStatusEvent {
+    ReplicationStatus(CommandStatus cmdStatus) {
+      super(cmdStatus);
+    }
+  }
+
+  /**
+   * Wrapper event for CloseContainer Command.
+   */
+  public static class CloseContainerStatus extends CommandStatusEvent {
+    CloseContainerStatus(CommandStatus cmdStatus) {
+      super(cmdStatus);
+    }
+  }
+
+  /**
+   * Wrapper event for DeleteBlock Command.
+   */
+  public static class DeleteBlockCommandStatus extends CommandStatusEvent {
+    DeleteBlockCommandStatus(CommandStatus cmdStatus) {
+      super(cmdStatus);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
new file mode 100644
index 0000000..ba17fb9
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * <p>
+ * This package contains HDDS protocol related classes.
+ */
+
+/**
+ * This package contains HDDS protocol related classes.
+ */
+package org.apache.hadoop.hdds.scm.command;
+/*
+ * Classes related to commands issued from SCM to DataNode.
+ * */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index 485b3f5..46f1588 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hdds.scm.events;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.*;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .CommandStatusReportFromDatanode;
@@ -105,6 +106,29 @@ public final class SCMEvents {
       new TypedEvent<>(DatanodeDetails.class, "Dead_Node");
 
   /**
+   * This event will be triggered by CommandStatusReportHandler whenever a
+   * status for Replication SCMCommand is received.
+   */
+  public static final Event<ReplicationStatus> REPLICATION_STATUS = new
+      TypedEvent<>(ReplicationStatus.class, "ReplicateCommandStatus");
+  /**
+   * This event will be triggered by CommandStatusReportHandler whenever a
+   * status for CloseContainer SCMCommand is received.
+   */
+  public static final Event<CloseContainerStatus>
+      CLOSE_CONTAINER_STATUS =
+      new TypedEvent<>(CloseContainerStatus.class,
+          "CloseContainerCommandStatus");
+  /**
+   * This event will be triggered by CommandStatusReportHandler whenever a
+   * status for DeleteBlock SCMCommand is received.
+   */
+  public static final Event<DeleteBlockCommandStatus>
+      DELETE_BLOCK_STATUS =
+      new TypedEvent(DeleteBlockCommandStatus.class,
+          "DeleteBlockCommandStatus");
+
+  /**
    * Private Ctor. Never Constructed.
    */
   private SCMEvents() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index f37a0ed..aba6410 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
+import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
@@ -191,6 +192,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         new NodeReportHandler(scmNodeManager);
     ContainerReportHandler containerReportHandler =
         new ContainerReportHandler(scmContainerManager, node2ContainerMap);
+    CommandStatusReportHandler cmdStatusReportHandler =
+        new CommandStatusReportHandler();
     NewNodeHandler newNodeHandler = new NewNodeHandler(node2ContainerMap);
     StaleNodeHandler staleNodeHandler = new StaleNodeHandler(node2ContainerMap);
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(node2ContainerMap);
@@ -202,6 +205,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
     eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
     eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
+    eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
 
     scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
         .OZONE_ADMINISTRATORS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 7568bf3..8d7a2c2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -17,8 +17,14 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol
     .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol
+    .proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
+import org.apache.hadoop.hdds.protocol
+    .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -90,6 +96,18 @@ public final class TestUtils {
     return reportList;
   }
 
+  /**
+   * Create Command Status report object.
+   * @return CommandStatusReportsProto
+   */
+  public static CommandStatusReportsProto createCommandStatusReport(
+      List<CommandStatus> reports) {
+    CommandStatusReportsProto.Builder report = CommandStatusReportsProto
+        .newBuilder();
+    report.addAllCmdStatus(reports);
+    return report.build();
+  }
+
 
   /**
    * Get specified number of DatanodeDetails and registered them with node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
new file mode 100644
index 0000000..5e64e57
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.command;
+
+import org.apache.hadoop.hdds.HddsIdFactory;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatus;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+    .CommandStatusReportFromDatanode;
+
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+public class TestCommandStatusReportHandler implements EventPublisher {
+
+  private static Logger LOG = LoggerFactory
+      .getLogger(TestCommandStatusReportHandler.class);
+  private CommandStatusReportHandler cmdStatusReportHandler;
+  private String storagePath = GenericTestUtils.getRandomizedTempPath()
+      .concat("/" + UUID.randomUUID().toString());
+  ;
+
+  @Before
+  public void setup() {
+    cmdStatusReportHandler = new CommandStatusReportHandler();
+  }
+
+  @Test
+  public void testCommandStatusReport() {
+    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
+        .captureLogs(LOG);
+
+    CommandStatusReportFromDatanode report = this.getStatusReport(Collections
+        .emptyList());
+    cmdStatusReportHandler.onMessage(report, this);
+    assertFalse(logCapturer.getOutput().contains("DeleteBlockCommandStatus"));
+    assertFalse(logCapturer.getOutput().contains
+        ("CloseContainerCommandStatus"));
+    assertFalse(logCapturer.getOutput().contains
+        ("ReplicateCommandStatus"));
+
+
+    report = this.getStatusReport(this.getCommandStatusList());
+    cmdStatusReportHandler.onMessage(report, this);
+    assertTrue(logCapturer.getOutput().contains("firing event of type " +
+        "DeleteBlockCommandStatus"));
+    assertTrue(logCapturer.getOutput().contains("firing event of type " +
+        "CloseContainerCommandStatus"));
+    assertTrue(logCapturer.getOutput().contains("firing event of type " +
+        "ReplicateCommandStatus"));
+
+    assertTrue(logCapturer.getOutput().contains("type: " +
+        "closeContainerCommand"));
+    assertTrue(logCapturer.getOutput().contains("type: " +
+        "deleteBlocksCommand"));
+    assertTrue(logCapturer.getOutput().contains("type: " +
+        "replicateContainerCommand"));
+
+  }
+
+  private CommandStatusReportFromDatanode getStatusReport(List<CommandStatus>
+      reports) {
+    CommandStatusReportsProto report = TestUtils.createCommandStatusReport
+        (reports);
+    DatanodeDetails dn = TestUtils.getDatanodeDetails();
+    return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode
+        (dn, report);
+  }
+
+  @Override
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent
+      (EVENT_TYPE event, PAYLOAD payload) {
+    LOG.info("firing event of type {}, payload {}", event.getName(), payload
+        .toString());
+  }
+
+  private List<CommandStatus> getCommandStatusList() {
+    List<CommandStatus> reports = new ArrayList<>(3);
+
+    // Add status message for replication, close container and delete block
+    // command.
+    CommandStatus.Builder builder = CommandStatus.newBuilder();
+
+    builder.setCmdId(HddsIdFactory.getLongId())
+        .setStatus(CommandStatus.Status.EXECUTED)
+        .setType(Type.deleteBlocksCommand);
+    reports.add(builder.build());
+
+    builder.setCmdId(HddsIdFactory.getLongId())
+        .setStatus(CommandStatus.Status.EXECUTED)
+        .setType(Type.closeContainerCommand);
+    reports.add(builder.build());
+
+    builder.setMsg("Not enough space")
+        .setCmdId(HddsIdFactory.getLongId())
+        .setStatus(CommandStatus.Status.FAILED)
+        .setType(Type.replicateContainerCommand);
+    reports.add(builder.build());
+    return reports;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
new file mode 100644
index 0000000..f529c20
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.hdds.scm.command;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] hadoop git commit: HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham.

Posted by in...@apache.org.
HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16f9aee5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16f9aee5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16f9aee5

Branch: refs/heads/HADOOP-15461
Commit: 16f9aee5f55bc37c1bb243708ee9b3f97e5a5b83
Parents: 2ced3ef
Author: Nanda kumar <na...@apache.org>
Authored: Tue Jul 24 12:09:15 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Tue Jul 24 12:09:15 2018 +0530

----------------------------------------------------------------------
 .../container/common/volume/HddsVolume.java     | 81 ++++++++++++++------
 .../container/common/volume/VolumeSet.java      | 28 +++++--
 .../container/common/volume/TestVolumeSet.java  | 35 ++++++++-
 3 files changed, 111 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 0cbfd9f..6b90146 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory;
 import java.io.File;
 import java.io.IOException;
 import java.util.Properties;
+import java.util.UUID;
 
 /**
  * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a
@@ -84,6 +85,7 @@ public final class HddsVolume {
 
     private String datanodeUuid;
     private String clusterID;
+    private boolean failedVolume = false;
 
     public Builder(String rootDirStr) {
       this.volumeRootStr = rootDirStr;
@@ -114,29 +116,47 @@ public final class HddsVolume {
       return this;
     }
 
+    // This is added just to create failed volume objects, which will be used
+    // to create failed HddsVolume objects in the case of any exceptions caused
+    // during creating HddsVolume object.
+    public Builder failedVolume(boolean failed) {
+      this.failedVolume = failed;
+      return this;
+    }
+
     public HddsVolume build() throws IOException {
       return new HddsVolume(this);
     }
   }
 
   private HddsVolume(Builder b) throws IOException {
-    StorageLocation location = StorageLocation.parse(b.volumeRootStr);
-    hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR);
-    this.state = VolumeState.NOT_INITIALIZED;
-    this.clusterID = b.clusterID;
-    this.datanodeUuid = b.datanodeUuid;
-    this.volumeIOStats = new VolumeIOStats();
-
-    VolumeInfo.Builder volumeBuilder =
-        new VolumeInfo.Builder(b.volumeRootStr, b.conf)
-        .storageType(b.storageType)
-        .configuredCapacity(b.configuredCapacity);
-    this.volumeInfo = volumeBuilder.build();
-
-    LOG.info("Creating Volume: " + this.hddsRootDir + " of  storage type : " +
-        b.storageType + " and capacity : " + volumeInfo.getCapacity());
-
-    initialize();
+    if (!b.failedVolume) {
+      StorageLocation location = StorageLocation.parse(b.volumeRootStr);
+      hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR);
+      this.state = VolumeState.NOT_INITIALIZED;
+      this.clusterID = b.clusterID;
+      this.datanodeUuid = b.datanodeUuid;
+      this.volumeIOStats = new VolumeIOStats();
+
+      VolumeInfo.Builder volumeBuilder =
+          new VolumeInfo.Builder(b.volumeRootStr, b.conf)
+              .storageType(b.storageType)
+              .configuredCapacity(b.configuredCapacity);
+      this.volumeInfo = volumeBuilder.build();
+
+      LOG.info("Creating Volume: " + this.hddsRootDir + " of  storage type : " +
+          b.storageType + " and capacity : " + volumeInfo.getCapacity());
+
+      initialize();
+    } else {
+      // Builder is called with failedVolume set, so create a failed volume
+      // HddsVolumeObject.
+      hddsRootDir = new File(b.volumeRootStr);
+      volumeIOStats = null;
+      volumeInfo = null;
+      storageID = UUID.randomUUID().toString();
+      state = VolumeState.FAILED;
+    }
   }
 
   public VolumeInfo getVolumeInfo() {
@@ -285,7 +305,10 @@ public final class HddsVolume {
   }
 
   public StorageType getStorageType() {
-    return volumeInfo.getStorageType();
+    if(volumeInfo != null) {
+      return volumeInfo.getStorageType();
+    }
+    return StorageType.DEFAULT;
   }
 
   public String getStorageID() {
@@ -313,11 +336,17 @@ public final class HddsVolume {
   }
 
   public long getCapacity() throws IOException {
-    return volumeInfo.getCapacity();
+    if(volumeInfo != null) {
+      return volumeInfo.getCapacity();
+    }
+    return 0;
   }
 
   public long getAvailable() throws IOException {
-    return volumeInfo.getAvailable();
+    if(volumeInfo != null) {
+      return volumeInfo.getAvailable();
+    }
+    return 0;
   }
 
   public void setState(VolumeState state) {
@@ -334,12 +363,16 @@ public final class HddsVolume {
 
   public void failVolume() {
     setState(VolumeState.FAILED);
-    volumeInfo.shutdownUsageThread();
+    if (volumeInfo != null) {
+      volumeInfo.shutdownUsageThread();
+    }
   }
 
   public void shutdown() {
     this.state = VolumeState.NON_EXISTENT;
-    volumeInfo.shutdownUsageThread();
+    if (volumeInfo != null) {
+      volumeInfo.shutdownUsageThread();
+    }
   }
 
   /**
@@ -368,6 +401,8 @@ public final class HddsVolume {
    */
   @VisibleForTesting
   public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    volumeInfo.setScmUsageForTesting(scmUsageForTest);
+    if (volumeInfo != null) {
+      volumeInfo.setScmUsageForTesting(scmUsageForTest);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 4dfde37..4a1487b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -76,6 +76,7 @@ public class VolumeSet {
    * mutually exclusive.
    */
   private Map<String, HddsVolume> failedVolumeMap;
+
   /**
    * {@link VolumeSet#volumeStateMap} maintains a list of active volumes per
    * StorageType.
@@ -95,12 +96,12 @@ public class VolumeSet {
   private Runnable shutdownHook;
 
   public VolumeSet(String dnUuid, Configuration conf)
-      throws DiskOutOfSpaceException {
+      throws IOException {
     this(dnUuid, null, conf);
   }
 
   public VolumeSet(String dnUuid, String clusterID, Configuration conf)
-      throws DiskOutOfSpaceException {
+      throws IOException {
     this.datanodeUuid = dnUuid;
     this.clusterID = clusterID;
     this.conf = conf;
@@ -120,7 +121,7 @@ public class VolumeSet {
   }
 
   // Add DN volumes configured through ConfigKeys to volumeMap.
-  private void initializeVolumeSet() throws DiskOutOfSpaceException {
+  private void initializeVolumeSet() throws IOException {
     volumeMap = new ConcurrentHashMap<>();
     failedVolumeMap = new ConcurrentHashMap<>();
     volumeStateMap = new EnumMap<>(StorageType.class);
@@ -153,6 +154,9 @@ public class VolumeSet {
         LOG.info("Added Volume : {} to VolumeSet",
             hddsVolume.getHddsRootDir().getPath());
       } catch (IOException e) {
+        HddsVolume volume = new HddsVolume.Builder(locationString)
+            .failedVolume(true).build();
+        failedVolumeMap.put(locationString, volume);
         LOG.error("Failed to parse the storage location: " + locationString, e);
       }
     }
@@ -337,11 +341,12 @@ public class VolumeSet {
   public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
       throws IOException {
     boolean failed;
-    StorageLocationReport[] reports =
-        new StorageLocationReport[volumeMap.size()];
+    StorageLocationReport[] reports = new StorageLocationReport[volumeMap
+        .size() + failedVolumeMap.size()];
     int counter = 0;
+    HddsVolume hddsVolume;
     for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
-      HddsVolume hddsVolume = entry.getValue();
+      hddsVolume = entry.getValue();
       VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
       long scmUsed = 0;
       long remaining = 0;
@@ -370,6 +375,17 @@ public class VolumeSet {
       StorageLocationReport r = builder.build();
       reports[counter++] = r;
     }
+    for (Map.Entry<String, HddsVolume> entry : failedVolumeMap.entrySet()) {
+      hddsVolume = entry.getValue();
+      StorageLocationReport.Builder builder = StorageLocationReport
+          .newBuilder();
+      builder.setStorageLocation(hddsVolume.getHddsRootDir()
+          .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true)
+          .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType(
+              hddsVolume.getStorageType());
+      StorageLocationReport r = builder.build();
+      reports[counter++] = r;
+    }
     NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
     for (int i = 0; i < reports.length; i++) {
       nrb.addStorageReport(reports[i].getProtoBufMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 3ee9343..fca68b1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -27,8 +27,10 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
     .HDDS_VOLUME_DIR;
 import static org.junit.Assert.assertEquals;
@@ -82,14 +84,16 @@ public class TestVolumeSet {
   @After
   public void shutdown() throws IOException {
     // Delete the hdds volume root dir
-    List<HddsVolume> volumes = new ArrayList<>();
-    volumes.addAll(volumeSet.getVolumesList());
-    volumes.addAll(volumeSet.getFailedVolumesList());
+    List<HddsVolume> hddsVolumes = new ArrayList<>();
+    hddsVolumes.addAll(volumeSet.getVolumesList());
+    hddsVolumes.addAll(volumeSet.getFailedVolumesList());
 
-    for (HddsVolume volume : volumes) {
+    for (HddsVolume volume : hddsVolumes) {
       FileUtils.deleteDirectory(volume.getHddsRootDir());
     }
     volumeSet.shutdown();
+
+    FileUtil.fullyDelete(new File(baseDir));
   }
 
   private boolean checkVolumeExistsInVolumeSet(String volume) {
@@ -222,6 +226,29 @@ public class TestVolumeSet {
         // Do Nothing. Exception is expected.
       }
     }
+  }
+
+  @Test
+  public void testFailVolumes() throws  Exception{
+    VolumeSet volSet = null;
+    File readOnlyVolumePath = new File(baseDir);
+    //Set to readonly, so that this volume will be failed
+    readOnlyVolumePath.setReadOnly();
+    File volumePath = GenericTestUtils.getRandomizedTestDir();
+    OzoneConfiguration ozoneConfig = new OzoneConfiguration();
+    ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath()
+        + "," + volumePath.getAbsolutePath());
+    volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig);
+    assertTrue(volSet.getFailedVolumesList().size() == 1);
+    assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0)
+        .getHddsRootDir());
+
+    //Set back to writable
+    try {
+      readOnlyVolumePath.setWritable(true);
+    } finally {
+      FileUtil.fullyDelete(volumePath);
+    }
 
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] hadoop git commit: HDFS-13743. RBF: Router throws NullPointerException due to the invalid initialization of MountTableResolver. Contributed by Takanobu Asanuma.

Posted by in...@apache.org.
HDFS-13743. RBF: Router throws NullPointerException due to the invalid initialization of MountTableResolver. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b25fb94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b25fb94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b25fb94

Branch: refs/heads/HADOOP-15461
Commit: 7b25fb949bf6f02df997beeca7df46c9e84c8d96
Parents: e6873df
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri Jul 20 17:28:57 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri Jul 20 17:28:57 2018 +0800

----------------------------------------------------------------------
 .../federation/resolver/MountTableResolver.java | 28 +++++--
 .../TestInitializeMountTableResolver.java       | 82 ++++++++++++++++++++
 2 files changed, 102 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b25fb94/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 3f6efd6..c264de3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.federation.resolver;
 
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
@@ -42,7 +44,6 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -149,14 +150,25 @@ public class MountTableResolver
    * @param conf Configuration for this resolver.
    */
   private void initDefaultNameService(Configuration conf) {
-    try {
-      this.defaultNameService = conf.get(
-          DFS_ROUTER_DEFAULT_NAMESERVICE,
-          DFSUtil.getNamenodeNameServiceId(conf));
-    } catch (HadoopIllegalArgumentException e) {
-      LOG.error("Cannot find default name service, setting it to the first");
+    this.defaultNameService = conf.get(
+        DFS_ROUTER_DEFAULT_NAMESERVICE,
+        DFSUtil.getNamenodeNameServiceId(conf));
+
+    if (defaultNameService == null) {
+      LOG.warn(
+          "{} and {} is not set. Fallback to {} as the default name service.",
+          DFS_ROUTER_DEFAULT_NAMESERVICE, DFS_NAMESERVICE_ID, DFS_NAMESERVICES);
       Collection<String> nsIds = DFSUtilClient.getNameServiceIds(conf);
-      this.defaultNameService = nsIds.iterator().next();
+      if (nsIds.isEmpty()) {
+        this.defaultNameService = "";
+      } else {
+        this.defaultNameService = nsIds.iterator().next();
+      }
+    }
+
+    if (this.defaultNameService.equals("")) {
+      LOG.warn("Default name service is not set.");
+    } else {
       LOG.info("Default name service: {}", this.defaultNameService);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b25fb94/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
new file mode 100644
index 0000000..5db7531
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.resolver;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test {@link MountTableResolver} initialization.
+ */
+public class TestInitializeMountTableResolver {
+
+  @Test
+  public void testDefaultNameserviceIsMissing() {
+    Configuration conf = new Configuration();
+    MountTableResolver mountTable = new MountTableResolver(conf);
+    assertEquals("", mountTable.getDefaultNamespace());
+  }
+
+  @Test
+  public void testDefaultNameserviceWithEmptyString() {
+    Configuration conf = new Configuration();
+    conf.set(DFS_ROUTER_DEFAULT_NAMESERVICE, "");
+    MountTableResolver mountTable = new MountTableResolver(conf);
+    assertEquals("", mountTable.getDefaultNamespace());
+  }
+
+  @Test
+  public void testRouterDefaultNameservice() {
+    Configuration conf = new Configuration();
+    conf.set(DFS_ROUTER_DEFAULT_NAMESERVICE, "router_ns"); // this is priority
+    conf.set(DFS_NAMESERVICE_ID, "ns_id");
+    conf.set(DFS_NAMESERVICES, "nss");
+    MountTableResolver mountTable = new MountTableResolver(conf);
+    assertEquals("router_ns", mountTable.getDefaultNamespace());
+  }
+
+  @Test
+  public void testNameserviceID() {
+    Configuration conf = new Configuration();
+    conf.set(DFS_NAMESERVICE_ID, "ns_id"); // this is priority
+    conf.set(DFS_NAMESERVICES, "nss");
+    MountTableResolver mountTable = new MountTableResolver(conf);
+    assertEquals("ns_id", mountTable.getDefaultNamespace());
+  }
+
+  @Test
+  public void testSingleNameservices() {
+    Configuration conf = new Configuration();
+    conf.set(DFS_NAMESERVICES, "ns1");
+    MountTableResolver mountTable = new MountTableResolver(conf);
+    assertEquals("ns1", mountTable.getDefaultNamespace());
+  }
+
+  @Test
+  public void testMultipleNameservices() {
+    Configuration conf = new Configuration();
+    conf.set(DFS_NAMESERVICES, "ns1,ns2");
+    MountTableResolver mountTable = new MountTableResolver(conf);
+    assertEquals("ns1", mountTable.getDefaultNamespace());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] hadoop git commit: YARN-8360. Improve YARN service restart policy and node manager auto restart policy. Contributed by Suma Shivaprasad

Posted by in...@apache.org.
YARN-8360. Improve YARN service restart policy and node manager auto restart policy.
           Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84d7bf1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84d7bf1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84d7bf1e

Branch: refs/heads/HADOOP-15461
Commit: 84d7bf1eeff6b9418361afa4aa713e5e6f771365
Parents: bbe2f62
Author: Eric Yang <ey...@apache.org>
Authored: Mon Jul 23 12:57:01 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Mon Jul 23 12:57:01 2018 -0400

----------------------------------------------------------------------
 .../service/component/AlwaysRestartPolicy.java  |  5 ++
 .../component/ComponentRestartPolicy.java       |  2 +
 .../service/component/NeverRestartPolicy.java   |  5 ++
 .../component/OnFailureRestartPolicy.java       |  5 ++
 .../provider/AbstractProviderService.java       | 29 +++++----
 .../hadoop/yarn/service/ServiceTestUtils.java   |  2 +-
 .../containerlaunch/TestAbstractLauncher.java   | 66 ++++++++++++++++++++
 7 files changed, 101 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
index 704ab14..505120d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
@@ -79,4 +79,9 @@ public final class AlwaysRestartPolicy implements ComponentRestartPolicy {
   @Override public boolean shouldTerminate(Component component) {
     return false;
   }
+
+  @Override public boolean allowContainerRetriesForInstance(
+      ComponentInstance componentInstance) {
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
index 23b0fb9..c5adffe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
@@ -42,4 +42,6 @@ public interface ComponentRestartPolicy {
 
   boolean shouldTerminate(Component component);
 
+  boolean allowContainerRetriesForInstance(ComponentInstance componentInstance);
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
index ace1f89..cd44a58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
@@ -79,4 +79,9 @@ public final class NeverRestartPolicy implements ComponentRestartPolicy {
     }
     return true;
   }
+
+  @Override public boolean allowContainerRetriesForInstance(
+      ComponentInstance componentInstance) {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java
index 39fba2a..b939ba0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java
@@ -84,4 +84,9 @@ public final class OnFailureRestartPolicy implements ComponentRestartPolicy {
     }
     return true;
   }
+
+  @Override public boolean allowContainerRetriesForInstance(
+      ComponentInstance componentInstance) {
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
index 9c71e66..3dfdadc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.component.ComponentRestartPolicy;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
 import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService;
@@ -116,18 +117,22 @@ public abstract class AbstractProviderService implements ProviderService,
 
   public void buildContainerRetry(AbstractLauncher launcher,
       Configuration yarnConf,
-      ContainerLaunchService.ComponentLaunchContext compLaunchContext) {
+      ContainerLaunchService.ComponentLaunchContext compLaunchContext,
+      ComponentInstance instance) {
     // By default retry forever every 30 seconds
-    launcher.setRetryContext(
-        YarnServiceConf.getInt(CONTAINER_RETRY_MAX,
-            DEFAULT_CONTAINER_RETRY_MAX,
-            compLaunchContext.getConfiguration(), yarnConf),
-        YarnServiceConf.getInt(CONTAINER_RETRY_INTERVAL,
-            DEFAULT_CONTAINER_RETRY_INTERVAL,
-            compLaunchContext.getConfiguration(), yarnConf),
-        YarnServiceConf.getLong(CONTAINER_FAILURES_VALIDITY_INTERVAL,
-            DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL,
-            compLaunchContext.getConfiguration(), yarnConf));
+
+    ComponentRestartPolicy restartPolicy = instance.getComponent()
+        .getRestartPolicyHandler();
+    if (restartPolicy.allowContainerRetriesForInstance(instance)) {
+      launcher.setRetryContext(YarnServiceConf
+          .getInt(CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_MAX,
+              compLaunchContext.getConfiguration(), yarnConf), YarnServiceConf
+          .getInt(CONTAINER_RETRY_INTERVAL, DEFAULT_CONTAINER_RETRY_INTERVAL,
+              compLaunchContext.getConfiguration(), yarnConf), YarnServiceConf
+          .getLong(CONTAINER_FAILURES_VALIDITY_INTERVAL,
+              DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL,
+              compLaunchContext.getConfiguration(), yarnConf));
+    }
   }
 
   public void buildContainerLaunchContext(AbstractLauncher launcher,
@@ -161,6 +166,6 @@ public abstract class AbstractProviderService implements ProviderService,
         yarnConf, container, compLaunchContext, tokensForSubstitution);
 
     // Setup container retry settings
-    buildContainerRetry(launcher, yarnConf, compLaunchContext);
+    buildContainerRetry(launcher, yarnConf, compLaunchContext, instance);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
index 3d1412d..170c20b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
@@ -115,7 +115,7 @@ public class ServiceTestUtils {
     exampleApp.setName(serviceName);
     exampleApp.setVersion("v1");
     exampleApp.addComponent(
-        createComponent("terminating-comp1", 2, "sleep " + "1000",
+        createComponent("terminating-comp1", 2, "sleep 1000",
             Component.RestartPolicyEnum.NEVER, null));
     exampleApp.addComponent(
         createComponent("terminating-comp2", 2, "sleep 1000",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java
index f4f1a50..108078c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java
@@ -19,13 +19,33 @@
 package org.apache.hadoop.yarn.service.containerlaunch;
 
 import org.apache.hadoop.yarn.service.ServiceContext;
+import org.apache.hadoop.yarn.service.api.records.Configuration;
+import org.apache.hadoop.yarn.service.component.AlwaysRestartPolicy;
+import org.apache.hadoop.yarn.service.component.Component;
+import org.apache.hadoop.yarn.service.component.NeverRestartPolicy;
+import org.apache.hadoop.yarn.service.component.OnFailureRestartPolicy;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.provider.defaultImpl
+    .DefaultProviderService;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
 
+import static org.apache.hadoop.fi.FiConfig.getConfig;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf
+    .DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf
+    .DEFAULT_CONTAINER_RETRY_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf
+    .DEFAULT_CONTAINER_RETRY_MAX;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 /**
  * Tests for {@link AbstractLauncher}.
@@ -51,4 +71,50 @@ public class TestAbstractLauncher {
 
     Assert.assertEquals("s1:t1:ro,s2:t2:ro", dockerContainerMounts);
   }
+
+  @Test
+  public void testContainerRetries() throws Exception {
+
+    DefaultProviderService providerService = new DefaultProviderService();
+    AbstractLauncher mockLauncher = mock(AbstractLauncher.class);
+    ContainerLaunchService.ComponentLaunchContext componentLaunchContext =
+        mock(ContainerLaunchService.ComponentLaunchContext.class);
+
+    ComponentInstance componentInstance = mock(ComponentInstance.class);
+
+    //Never Restart Policy
+    Component component = mock(Component.class);
+    when(componentInstance.getComponent()).thenReturn(component);
+
+    when(component.getRestartPolicyHandler()).thenReturn(NeverRestartPolicy
+        .getInstance());
+
+    providerService.buildContainerRetry(mockLauncher, getConfig(),
+        componentLaunchContext, componentInstance);
+    verifyZeroInteractions(mockLauncher);
+
+
+    //OnFailure restart policy
+    when(component.getRestartPolicyHandler()).thenReturn(OnFailureRestartPolicy
+        .getInstance());
+    when(componentLaunchContext.getConfiguration()).thenReturn(new
+        Configuration());
+    providerService.buildContainerRetry(mockLauncher, getConfig(),
+        componentLaunchContext, componentInstance);
+    verify(mockLauncher).setRetryContext(DEFAULT_CONTAINER_RETRY_MAX,
+        DEFAULT_CONTAINER_RETRY_INTERVAL,
+        DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL);
+
+    reset(mockLauncher);
+
+    //Always restart policy
+    when(component.getRestartPolicyHandler()).thenReturn(AlwaysRestartPolicy
+        .getInstance());
+    providerService.buildContainerRetry(mockLauncher, getConfig(),
+        componentLaunchContext, componentInstance);
+
+    verify(mockLauncher).setRetryContext(DEFAULT_CONTAINER_RETRY_MAX,
+        DEFAULT_CONTAINER_RETRY_INTERVAL,
+        DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] hadoop git commit: HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh.

Posted by in...@apache.org.
HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9be25e34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9be25e34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9be25e34

Branch: refs/heads/HADOOP-15461
Commit: 9be25e347683d26e0575458c7f470c76fd4d951b
Parents: d2acf8d
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jul 20 14:22:02 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 20 14:22:02 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   5 +
 .../scm/container/common/helpers/Pipeline.java  |   7 +
 .../common/src/main/resources/ozone-default.xml |  12 ++
 .../hdds/scm/container/ContainerMapping.java    |   4 +
 .../hdds/scm/exceptions/SCMException.java       |   1 +
 .../hdds/scm/pipelines/PipelineManager.java     |  64 +++---
 .../hdds/scm/pipelines/PipelineSelector.java    | 212 ++++++++++++++++---
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  33 +--
 .../standalone/StandaloneManagerImpl.java       |  21 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |  14 ++
 10 files changed, 273 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 71184cf..6e940ad 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -236,6 +236,11 @@ public final class ScmConfigKeys {
   public static final String
       OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
 
+  public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT =
+      "ozone.scm.pipeline.creation.lease.timeout";
+
+  public static final String
+      OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
 
   public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
       "ozone.scm.block.deletion.max.retry";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
index c5794f4..534c9fd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -214,6 +214,13 @@ public class Pipeline {
   }
 
   /**
+   * Update the State of the pipeline.
+   */
+  public void setLifeCycleState(HddsProtos.LifeCycleState nextState) {
+     lifeCycleState = nextState;
+  }
+
+  /**
    * Gets the pipeline Name.
    *
    * @return - Name of the pipeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5a1d26a..69a382a 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1085,5 +1085,17 @@
       executed since last report. Unit could be defined with
       postfix (ns,ms,s,m,h,d)</description>
   </property>
+  <property>
+    <name>ozone.scm.pipeline.creation.lease.timeout</name>
+    <value>60s</value>
+    <tag>OZONE, SCM, PIPELINE</tag>
+    <description>
+      Pipeline creation timeout in milliseconds to be used by SCM. When
+      BEGIN_CREATE event happens the pipeline is moved from ALLOCATED to
+      CREATING state, SCM will now wait for the configured amount of time
+      to get COMPLETE_CREATE event if it doesn't receive it will move the
+      pipeline to DELETING.
+    </description>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 26f4d86..f07d22b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -658,6 +658,10 @@ public class ContainerMapping implements Mapping {
     if (containerStore != null) {
       containerStore.close();
     }
+
+    if (pipelineSelector != null) {
+      pipelineSelector.shutdown();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index d7d70ef..0085542 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -107,6 +107,7 @@ public class SCMException extends IOException {
     FAILED_TO_LOAD_OPEN_CONTAINER,
     FAILED_TO_ALLOCATE_CONTAINER,
     FAILED_TO_CHANGE_CONTAINER_STATE,
+    FAILED_TO_CHANGE_PIPELINE_STATE,
     CONTAINER_EXISTS,
     FAILED_TO_FIND_CONTAINER,
     FAILED_TO_FIND_CONTAINER_WITH_SPACE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
index a041973..77d8211 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
@@ -59,41 +59,16 @@ public abstract class PipelineManager {
    * @return a Pipeline.
    */
   public synchronized final Pipeline getPipeline(
-      ReplicationFactor replicationFactor, ReplicationType replicationType)
-      throws IOException {
-    /**
-     * In the Ozone world, we have a very simple policy.
-     *
-     * 1. Try to create a pipeline if there are enough free nodes.
-     *
-     * 2. This allows all nodes to part of a pipeline quickly.
-     *
-     * 3. if there are not enough free nodes, return pipeline in a
-     * round-robin fashion.
-     *
-     * TODO: Might have to come up with a better algorithm than this.
-     * Create a new placement policy that returns pipelines in round robin
-     * fashion.
-     */
-    Pipeline pipeline = allocatePipeline(replicationFactor);
+      ReplicationFactor replicationFactor, ReplicationType replicationType) {
+    Pipeline pipeline = findOpenPipeline(replicationType, replicationFactor);
     if (pipeline != null) {
-      LOG.debug("created new pipeline:{} for container with " +
+      LOG.debug("re-used pipeline:{} for container with " +
               "replicationType:{} replicationFactor:{}",
           pipeline.getPipelineName(), replicationType, replicationFactor);
-      activePipelines.add(pipeline);
-      activePipelineMap.put(pipeline.getPipelineName(), pipeline);
-      node2PipelineMap.addPipeline(pipeline);
-    } else {
-      pipeline = findOpenPipeline(replicationType, replicationFactor);
-      if (pipeline != null) {
-        LOG.debug("re-used pipeline:{} for container with " +
-                "replicationType:{} replicationFactor:{}",
-            pipeline.getPipelineName(), replicationType, replicationFactor);
-      }
     }
     if (pipeline == null) {
       LOG.error("Get pipeline call failed. We are not able to find" +
-              "free nodes or operational pipeline.");
+              " operational pipeline.");
       return null;
     } else {
       return pipeline;
@@ -109,7 +84,7 @@ public abstract class PipelineManager {
   public synchronized final Pipeline getPipeline(String pipelineName) {
     Pipeline pipeline = null;
 
-    // 1. Check if pipeline channel already exists
+    // 1. Check if pipeline already exists
     if (activePipelineMap.containsKey(pipelineName)) {
       pipeline = activePipelineMap.get(pipelineName);
       LOG.debug("Returning pipeline for pipelineName:{}", pipelineName);
@@ -132,7 +107,13 @@ public abstract class PipelineManager {
   }
 
   public abstract Pipeline allocatePipeline(
-      ReplicationFactor replicationFactor) throws IOException;
+      ReplicationFactor replicationFactor);
+
+  /**
+   * Initialize the pipeline
+   * TODO: move the initialization to Ozone Client later
+   */
+  public abstract void initializePipeline(Pipeline pipeline) throws IOException;
 
   public void removePipeline(Pipeline pipeline) {
     activePipelines.remove(pipeline);
@@ -179,12 +160,23 @@ public abstract class PipelineManager {
   }
 
   /**
-   * Creates a pipeline from a specified set of Nodes.
-   * @param pipelineID - Name of the pipeline
-   * @param datanodes - The list of datanodes that make this pipeline.
+   * Creates a pipeline with a specified replication factor and type.
+   * @param replicationFactor - Replication Factor.
+   * @param replicationType - Replication Type.
    */
-  public abstract void createPipeline(String pipelineID,
-      List<DatanodeDetails> datanodes) throws IOException;
+  public Pipeline createPipeline(ReplicationFactor replicationFactor,
+      ReplicationType replicationType) throws IOException {
+    Pipeline pipeline = allocatePipeline(replicationFactor);
+    if (pipeline != null) {
+      LOG.debug("created new pipeline:{} for container with "
+              + "replicationType:{} replicationFactor:{}",
+          pipeline.getPipelineName(), replicationType, replicationFactor);
+      activePipelines.add(pipeline);
+      activePipelineMap.put(pipeline.getPipelineName(), pipeline);
+      node2PipelineMap.addPipeline(pipeline);
+    }
+    return pipeline;
+  }
 
   /**
    * Close the  pipeline with the given clusterId.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index 2955af5..08710e7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .SCMContainerPlacementRandom;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipelines.ratis.RatisManagerImpl;
 import org.apache.hadoop.hdds.scm.pipelines.standalone.StandaloneManagerImpl;
@@ -33,17 +34,28 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.statemachine
+    .InvalidStateTransitionException;
+import org.apache.hadoop.ozone.common.statemachine.StateMachine;
+import org.apache.hadoop.ozone.lease.Lease;
+import org.apache.hadoop.ozone.lease.LeaseException;
+import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
+    .FAILED_TO_CHANGE_PIPELINE_STATE;
+
 /**
  * Sends the request to the right pipeline manager.
  */
@@ -57,6 +69,10 @@ public class PipelineSelector {
   private final StandaloneManagerImpl standaloneManager;
   private final long containerSize;
   private final Node2PipelineMap node2PipelineMap;
+  private final LeaseManager<Pipeline> pipelineLeaseManager;
+  private final StateMachine<LifeCycleState,
+      HddsProtos.LifeCycleEvent> stateMachine;
+
   /**
    * Constructs a pipeline Selector.
    *
@@ -77,6 +93,74 @@ public class PipelineSelector {
     this.ratisManager =
         new RatisManagerImpl(this.nodeManager, placementPolicy, containerSize,
             conf, node2PipelineMap);
+    // Initialize the container state machine.
+    Set<HddsProtos.LifeCycleState> finalStates = new HashSet();
+    long pipelineCreationLeaseTimeout = conf.getTimeDuration(
+        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT,
+        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    LOG.trace("Starting Pipeline Lease Manager.");
+    pipelineLeaseManager = new LeaseManager<>(pipelineCreationLeaseTimeout);
+    pipelineLeaseManager.start();
+
+    // These are the steady states of a container.
+    finalStates.add(HddsProtos.LifeCycleState.OPEN);
+    finalStates.add(HddsProtos.LifeCycleState.CLOSED);
+
+    this.stateMachine = new StateMachine<>(HddsProtos.LifeCycleState.ALLOCATED,
+        finalStates);
+    initializeStateMachine();
+  }
+
+  /**
+   * Event and State Transition Mapping:
+   *
+   * State: ALLOCATED ---------------> CREATING
+   * Event:                CREATE
+   *
+   * State: CREATING  ---------------> OPEN
+   * Event:               CREATED
+   *
+   * State: OPEN      ---------------> CLOSING
+   * Event:               FINALIZE
+   *
+   * State: CLOSING   ---------------> CLOSED
+   * Event:                CLOSE
+   *
+   * State: CREATING  ---------------> CLOSED
+   * Event:               TIMEOUT
+   *
+   *
+   * Container State Flow:
+   *
+   * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]
+   *            (CREATE)     | (CREATED)     (FINALIZE)   |
+   *                         |                            |
+   *                         |                            |
+   *                         |(TIMEOUT)                   |(CLOSE)
+   *                         |                            |
+   *                         +--------> [CLOSED] <--------+
+   */
+  private void initializeStateMachine() {
+    stateMachine.addTransition(HddsProtos.LifeCycleState.ALLOCATED,
+        HddsProtos.LifeCycleState.CREATING,
+        HddsProtos.LifeCycleEvent.CREATE);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
+        HddsProtos.LifeCycleState.OPEN,
+        HddsProtos.LifeCycleEvent.CREATED);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.OPEN,
+        HddsProtos.LifeCycleState.CLOSING,
+        HddsProtos.LifeCycleEvent.FINALIZE);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.CLOSING,
+        HddsProtos.LifeCycleState.CLOSED,
+        HddsProtos.LifeCycleEvent.CLOSE);
+
+    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
+        HddsProtos.LifeCycleState.CLOSED,
+        HddsProtos.LifeCycleEvent.TIMEOUT);
   }
 
   /**
@@ -88,15 +172,14 @@ public class PipelineSelector {
    * @return pipeline corresponding to nodes
    */
   public static Pipeline newPipelineFromNodes(
-      List<DatanodeDetails> nodes, LifeCycleState state,
-      ReplicationType replicationType, ReplicationFactor replicationFactor,
-      String name) {
+      List<DatanodeDetails> nodes, ReplicationType replicationType,
+      ReplicationFactor replicationFactor, String name) {
     Preconditions.checkNotNull(nodes);
     Preconditions.checkArgument(nodes.size() > 0);
     String leaderId = nodes.get(0).getUuidString();
-    Pipeline
-        pipeline = new Pipeline(leaderId, state, replicationType,
-        replicationFactor, name);
+    // A new pipeline always starts in allocated state
+    Pipeline pipeline = new Pipeline(leaderId, LifeCycleState.ALLOCATED,
+        replicationType, replicationFactor, name);
     for (DatanodeDetails node : nodes) {
       pipeline.addMember(node);
     }
@@ -175,8 +258,35 @@ public class PipelineSelector {
     LOG.debug("Getting replication pipeline forReplicationType {} :" +
             " ReplicationFactor {}", replicationType.toString(),
         replicationFactor.toString());
-    return manager.
-        getPipeline(replicationFactor, replicationType);
+
+    /**
+     * In the Ozone world, we have a very simple policy.
+     *
+     * 1. Try to create a pipeline if there are enough free nodes.
+     *
+     * 2. This allows all nodes to part of a pipeline quickly.
+     *
+     * 3. if there are not enough free nodes, return already allocated pipeline
+     * in a round-robin fashion.
+     *
+     * TODO: Might have to come up with a better algorithm than this.
+     * Create a new placement policy that returns pipelines in round robin
+     * fashion.
+     */
+    Pipeline pipeline =
+        manager.createPipeline(replicationFactor, replicationType);
+    if (pipeline == null) {
+      // try to return a pipeline from already allocated pipelines
+      pipeline = manager.getPipeline(replicationFactor, replicationType);
+    } else {
+      // if a new pipeline is created, initialize its state machine
+      updatePipelineState(pipeline,HddsProtos.LifeCycleEvent.CREATE);
+
+      //TODO: move the initialization of pipeline to Ozone Client
+      manager.initializePipeline(pipeline);
+      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATED);
+    }
+    return pipeline;
   }
 
   /**
@@ -194,19 +304,6 @@ public class PipelineSelector {
         " pipelineName:{}", replicationType, pipelineName);
     return manager.getPipeline(pipelineName);
   }
-  /**
-   * Creates a pipeline from a specified set of Nodes.
-   */
-
-  public void createPipeline(ReplicationType replicationType, String
-      pipelineID, List<DatanodeDetails> datanodes) throws IOException {
-    PipelineManager manager = getPipelineManager(replicationType);
-    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Creating a pipeline: {} with nodes:{}", pipelineID,
-        datanodes.stream().map(DatanodeDetails::toString)
-            .collect(Collectors.joining(",")));
-    manager.createPipeline(pipelineID, datanodes);
-  }
 
   /**
    * Close the  pipeline with the given clusterId.
@@ -251,12 +348,77 @@ public class PipelineSelector {
   }
 
   public void removePipeline(UUID dnId) {
-    Set<Pipeline> pipelineChannelSet =
+    Set<Pipeline> pipelineSet =
         node2PipelineMap.getPipelines(dnId);
-    for (Pipeline pipelineChannel : pipelineChannelSet) {
-      getPipelineManager(pipelineChannel.getType())
-          .removePipeline(pipelineChannel);
+    for (Pipeline pipeline : pipelineSet) {
+      getPipelineManager(pipeline.getType())
+          .removePipeline(pipeline);
     }
     node2PipelineMap.removeDatanode(dnId);
   }
+
+  /**
+   * Update the Pipeline State to the next state.
+   *
+   * @param pipeline - Pipeline
+   * @param event - LifeCycle Event
+   * @throws SCMException  on Failure.
+   */
+  public void updatePipelineState(Pipeline pipeline,
+      HddsProtos.LifeCycleEvent event) throws IOException {
+    HddsProtos.LifeCycleState newState;
+    try {
+      newState = stateMachine.getNextState(pipeline.getLifeCycleState(), event);
+    } catch (InvalidStateTransitionException ex) {
+      String error = String.format("Failed to update pipeline state %s, " +
+              "reason: invalid state transition from state: %s upon " +
+              "event: %s.",
+          pipeline.getPipelineName(), pipeline.getLifeCycleState(), event);
+      LOG.error(error);
+      throw new SCMException(error, FAILED_TO_CHANGE_PIPELINE_STATE);
+    }
+
+    // This is a post condition after executing getNextState.
+    Preconditions.checkNotNull(newState);
+    Preconditions.checkNotNull(pipeline);
+    try {
+      switch (event) {
+      case CREATE:
+        // Acquire lease on pipeline
+        Lease<Pipeline> pipelineLease = pipelineLeaseManager.acquire(pipeline);
+        // Register callback to be executed in case of timeout
+        pipelineLease.registerCallBack(() -> {
+          updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.TIMEOUT);
+          return null;
+        });
+        break;
+      case CREATED:
+        // Release the lease on pipeline
+        pipelineLeaseManager.release(pipeline);
+        break;
+
+      case FINALIZE:
+        //TODO: cleanup pipeline by closing all the containers on the pipeline
+        break;
+
+      case CLOSE:
+      case TIMEOUT:
+        // TODO: Release the nodes here when pipelines are destroyed
+        break;
+      default:
+        throw new SCMException("Unsupported pipeline LifeCycleEvent.",
+            FAILED_TO_CHANGE_PIPELINE_STATE);
+      }
+
+      pipeline.setLifeCycleState(newState);
+    } catch (LeaseException e) {
+      throw new IOException("Lease Exception.", e);
+    }
+  }
+
+  public void shutdown() {
+    if (pipelineLeaseManager != null) {
+      pipelineLeaseManager.shutdown();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
index a8f8b20..c726ef6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.pipelines.Node2PipelineMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -72,7 +71,7 @@ public class RatisManagerImpl extends PipelineManager {
    * Allocates a new ratis Pipeline from the free nodes.
    *
    * @param factor - One or Three
-   * @return PipelineChannel.
+   * @return Pipeline.
    */
   public Pipeline allocatePipeline(ReplicationFactor factor) {
     List<DatanodeDetails> newNodesList = new LinkedList<>();
@@ -89,35 +88,23 @@ public class RatisManagerImpl extends PipelineManager {
           // further allocations
           ratisMembers.addAll(newNodesList);
           LOG.info("Allocating a new ratis pipeline of size: {}", count);
-          // Start all channel names with "Ratis", easy to grep the logs.
+          // Start all pipeline names with "Ratis", easy to grep the logs.
           String pipelineName = PREFIX +
               UUID.randomUUID().toString().substring(PREFIX.length());
-          Pipeline pipeline=
-              PipelineSelector.newPipelineFromNodes(newNodesList,
-              LifeCycleState.OPEN, ReplicationType.RATIS, factor, pipelineName);
-          try (XceiverClientRatis client =
-              XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
-            client.createPipeline(pipeline.getPipelineName(), newNodesList);
-          } catch (IOException e) {
-            return null;
-          }
-          return pipeline;
+          return PipelineSelector.newPipelineFromNodes(newNodesList,
+              ReplicationType.RATIS, factor, pipelineName);
         }
       }
     }
     return null;
   }
 
-  /**
-   * Creates a pipeline from a specified set of Nodes.
-   *
-   * @param pipelineID - Name of the pipeline
-   * @param datanodes - The list of datanodes that make this pipeline.
-   */
-  @Override
-  public void createPipeline(String pipelineID,
-                             List<DatanodeDetails> datanodes) {
-
+  public void initializePipeline(Pipeline pipeline) throws IOException {
+    //TODO:move the initialization from SCM to client
+    try (XceiverClientRatis client =
+        XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
+      client.createPipeline(pipeline.getPipelineName(), pipeline.getMachines());
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
index cf691bf..bb4951f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.pipelines.Node2PipelineMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -86,29 +85,19 @@ public class StandaloneManagerImpl extends PipelineManager {
           // once a datanode has been added to a pipeline, exclude it from
           // further allocations
           standAloneMembers.addAll(newNodesList);
-          LOG.info("Allocating a new standalone pipeline channel of size: {}",
-              count);
-          String channelName =
+          LOG.info("Allocating a new standalone pipeline of size: {}", count);
+          String pipelineName =
               "SA-" + UUID.randomUUID().toString().substring(3);
           return PipelineSelector.newPipelineFromNodes(newNodesList,
-              LifeCycleState.OPEN, ReplicationType.STAND_ALONE,
-              ReplicationFactor.ONE, channelName);
+              ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
         }
       }
     }
     return null;
   }
 
-  /**
-   * Creates a pipeline from a specified set of Nodes.
-   *
-   * @param pipelineID - Name of the pipeline
-   * @param datanodes - The list of datanodes that make this pipeline.
-   */
-  @Override
-  public void createPipeline(String pipelineID,
-                             List<DatanodeDetails> datanodes) {
-    //return newPipelineFromNodes(datanodes, pipelineID);
+  public void initializePipeline(Pipeline pipeline) {
+    // Nothing to be done for standalone pipeline
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index bc3505f..ffac6d5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.AfterClass;
@@ -51,6 +53,7 @@ public class TestNode2PipelineMap {
   private static ContainerWithPipeline ratisContainer;
   private static ContainerStateMap stateMap;
   private static ContainerMapping mapping;
+  private static PipelineSelector pipelineSelector;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -66,6 +69,7 @@ public class TestNode2PipelineMap {
     mapping = (ContainerMapping)scm.getScmContainerManager();
     stateMap = mapping.getStateManager().getContainerStateMap();
     ratisContainer = mapping.allocateContainer(RATIS, THREE, "testOwner");
+    pipelineSelector = mapping.getPipelineSelector();
   }
 
   /**
@@ -113,5 +117,15 @@ public class TestNode2PipelineMap {
     NavigableSet<ContainerID> set2 = stateMap.getOpenContainerIDsByPipeline(
         ratisContainer.getPipeline().getPipelineName());
     Assert.assertEquals(0, set2.size());
+
+    try {
+      pipelineSelector.updatePipelineState(ratisContainer.getPipeline(),
+          HddsProtos.LifeCycleEvent.CLOSE);
+      Assert.fail("closing of pipeline without finalize should fail");
+    } catch (Exception e) {
+      Assert.assertTrue(e instanceof SCMException);
+      Assert.assertEquals(((SCMException)e).getResult(),
+          SCMException.ResultCodes.FAILED_TO_CHANGE_PIPELINE_STATE);
+    }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] hadoop git commit: YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T.

Posted by in...@apache.org.
YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff7c2eda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff7c2eda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff7c2eda

Branch: refs/heads/HADOOP-15461
Commit: ff7c2eda34c2c40ad71b50df6462a661bd213fbd
Parents: 8461278
Author: bibinchundatt <bi...@apache.org>
Authored: Tue Jul 24 16:17:20 2018 +0530
Committer: bibinchundatt <bi...@apache.org>
Committed: Tue Jul 24 16:17:20 2018 +0530

----------------------------------------------------------------------
 .../impl/pb/AllocateResponsePBImpl.java         |  1 +
 .../resourcemanager/recovery/TestProtos.java    | 20 ++++++++++++++++++++
 2 files changed, 21 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7c2eda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
index 3ab5563..8df56b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -347,6 +347,7 @@ public class AllocateResponsePBImpl extends AllocateResponse {
 
   @Override
   public synchronized void setNMTokens(List<NMToken> nmTokens) {
+    maybeInitBuilder();
     if (nmTokens == null || nmTokens.isEmpty()) {
       if (this.nmTokens != null) {
         this.nmTokens.clear();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7c2eda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
index cc96412..d42b411 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
@@ -18,7 +18,15 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.recovery;
 
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
+import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -33,4 +41,16 @@ public class TestProtos {
     String protoString = proto.toString();
     Assert.assertNotNull(protoString);
   }
+
+  @Test
+  public void testProtoAllocateResponse() {
+    AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance();
+    AllocateResponsePBImpl alloc = new AllocateResponsePBImpl(proto);
+    List<NMToken> nmTokens = new ArrayList<NMToken>();
+    try {
+      alloc.setNMTokens(nmTokens);
+    } catch (Exception ex) {
+      fail();
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] hadoop git commit: HADOOP-15547/ WASB: improve listStatus performance. Contributed by Thomas Marquardt.

Posted by in...@apache.org.
HADOOP-15547/ WASB: improve listStatus performance.
Contributed by Thomas Marquardt.

(cherry picked from commit 749fff577ed9afb4ef8a54b8948f74be083cc620)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45d9568a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45d9568a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45d9568a

Branch: refs/heads/HADOOP-15461
Commit: 45d9568aaaf532a6da11bd7c1844ff81bf66bab1
Parents: 5836e0a
Author: Steve Loughran <st...@apache.org>
Authored: Thu Jul 19 12:31:19 2018 -0700
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Jul 19 12:31:19 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  10 +
 hadoop-tools/hadoop-azure/pom.xml               |  12 +
 .../fs/azure/AzureNativeFileSystemStore.java    | 182 ++++-----
 .../apache/hadoop/fs/azure/FileMetadata.java    |  77 ++--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 376 ++++++++-----------
 .../hadoop/fs/azure/NativeFileSystemStore.java  |  15 +-
 .../apache/hadoop/fs/azure/PartialListing.java  |  61 ---
 .../hadoop/fs/azure/ITestListPerformance.java   | 196 ++++++++++
 8 files changed, 514 insertions(+), 415 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
index cde1734..38de35e 100644
--- a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
@@ -47,4 +47,14 @@
        <Bug pattern="WMI_WRONG_MAP_ITERATOR" />
        <Priority value="2" />
      </Match>
+
+    <!-- FileMetadata is used internally for storing metadata but also
+    subclasses FileStatus to reduce allocations when listing a large number
+    of files.  When it is returned to an external caller as a FileStatus, the
+    extra metadata is no longer useful and we want the equals and hashCode
+    methods of FileStatus to be used. -->
+    <Match>
+        <Class name="org.apache.hadoop.fs.azure.FileMetadata" />
+        <Bug pattern="EQ_DOESNT_OVERRIDE_EQUALS" />
+    </Match>
  </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 44b67a0..52b5b72 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -43,6 +43,8 @@
     <fs.azure.scale.test.huge.partitionsize>unset</fs.azure.scale.test.huge.partitionsize>
     <!-- Timeout in seconds for scale tests.-->
     <fs.azure.scale.test.timeout>7200</fs.azure.scale.test.timeout>
+    <fs.azure.scale.test.list.performance.threads>10</fs.azure.scale.test.list.performance.threads>
+    <fs.azure.scale.test.list.performance.files>1000</fs.azure.scale.test.list.performance.files>
   </properties>
 
   <build>
@@ -298,6 +300,8 @@
                     <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
                     <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
                     <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+                    <fs.azure.scale.test.list.performance.threads>${fs.azure.scale.test.list.performance.threads}</fs.azure.scale.test.list.performance.threads>
+                    <fs.azure.scale.test.list.performance.files>${fs.azure.scale.test.list.performance.files}</fs.azure.scale.test.list.performance.files>
                   </systemPropertyVariables>
                   <includes>
                     <include>**/Test*.java</include>
@@ -326,6 +330,8 @@
                     <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
                     <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
                     <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+                    <fs.azure.scale.test.list.performance.threads>${fs.azure.scale.test.list.performance.threads}</fs.azure.scale.test.list.performance.threads>
+                    <fs.azure.scale.test.list.performance.files>${fs.azure.scale.test.list.performance.files}</fs.azure.scale.test.list.performance.files>
                   </systemPropertyVariables>
                   <includes>
                     <include>**/TestRollingWindowAverage*.java</include>
@@ -367,6 +373,8 @@
                     <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
                     <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
                     <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+                    <fs.azure.scale.test.list.performance.threads>${fs.azure.scale.test.list.performance.threads}</fs.azure.scale.test.list.performance.threads>
+                    <fs.azure.scale.test.list.performance.files>${fs.azure.scale.test.list.performance.files}</fs.azure.scale.test.list.performance.files>
                   </systemPropertyVariables>
                   <!-- Some tests cannot run in parallel.  Tests that cover -->
                   <!-- access to the root directory must run in isolation -->
@@ -412,6 +420,8 @@
                     <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
                     <fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
                     <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+                    <fs.azure.scale.test.list.performance.threads>${fs.azure.scale.test.list.performance.threads}</fs.azure.scale.test.list.performance.threads>
+                    <fs.azure.scale.test.list.performance.files>${fs.azure.scale.test.list.performance.files}</fs.azure.scale.test.list.performance.files>
                   </systemPropertyVariables>
                   <includes>
                     <include>**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java</include>
@@ -454,6 +464,8 @@
                     <fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
                     <fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
                     <fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
+                    <fs.azure.scale.test.list.performance.threads>${fs.azure.scale.test.list.performance.threads}</fs.azure.scale.test.list.performance.threads>
+                    <fs.azure.scale.test.list.performance.files>${fs.azure.scale.test.list.performance.files}</fs.azure.scale.test.list.performance.files>
                   </systemPropertyVariables>
                   <forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
                   <trimStackTrace>false</trimStackTrace>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 197ab22..d2f9ca6 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -30,7 +30,6 @@ import java.net.URISyntaxException;
 import java.net.URLDecoder;
 import java.net.URLEncoder;
 import java.security.InvalidKeyException;
-import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.Date;
 import java.util.EnumSet;
@@ -128,6 +127,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   // computed as min(2*cpu,8)
   private static final String KEY_CONCURRENT_CONNECTION_VALUE_OUT = "fs.azure.concurrentRequestCount.out";
 
+  private static final String HADOOP_BLOCK_SIZE_PROPERTY_NAME = "fs.azure.block.size";
   private static final String KEY_STREAM_MIN_READ_SIZE = "fs.azure.read.request.size";
   private static final String KEY_STORAGE_CONNECTION_TIMEOUT = "fs.azure.storage.timeout";
   private static final String KEY_WRITE_BLOCK_SIZE = "fs.azure.write.request.size";
@@ -252,6 +252,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   // Default block sizes
   public static final int DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
   public static final int DEFAULT_UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024;
+  public static final long DEFAULT_HADOOP_BLOCK_SIZE = 512 * 1024 * 1024L;
 
   private static final int DEFAULT_INPUT_STREAM_VERSION = 2;
 
@@ -313,6 +314,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   private boolean tolerateOobAppends = DEFAULT_READ_TOLERATE_CONCURRENT_APPEND;
 
+  private long hadoopBlockSize = DEFAULT_HADOOP_BLOCK_SIZE;
   private int downloadBlockSizeBytes = DEFAULT_DOWNLOAD_BLOCK_SIZE;
   private int uploadBlockSizeBytes = DEFAULT_UPLOAD_BLOCK_SIZE;
   private int inputStreamVersion = DEFAULT_INPUT_STREAM_VERSION;
@@ -740,6 +742,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
         KEY_STREAM_MIN_READ_SIZE, DEFAULT_DOWNLOAD_BLOCK_SIZE);
     this.uploadBlockSizeBytes = sessionConfiguration.getInt(
         KEY_WRITE_BLOCK_SIZE, DEFAULT_UPLOAD_BLOCK_SIZE);
+    this.hadoopBlockSize = sessionConfiguration.getLong(
+        HADOOP_BLOCK_SIZE_PROPERTY_NAME, DEFAULT_HADOOP_BLOCK_SIZE);
 
     this.inputStreamVersion = sessionConfiguration.getInt(
         KEY_INPUT_STREAM_VERSION, DEFAULT_INPUT_STREAM_VERSION);
@@ -1234,7 +1238,14 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     return false;
   }
 
-
+  /**
+   * Returns the file block size.  This is a fake value used for integration
+   * of the Azure store with Hadoop.
+   */
+  @Override
+  public long getHadoopBlockSize() {
+    return hadoopBlockSize;
+  }
 
   /**
    * This should be called from any method that does any modifications to the
@@ -2066,7 +2077,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
         // The key refers to root directory of container.
         // Set the modification time for root to zero.
         return new FileMetadata(key, 0, defaultPermissionNoBlobMetadata(),
-            BlobMaterialization.Implicit);
+            BlobMaterialization.Implicit, hadoopBlockSize);
       }
 
       CloudBlobWrapper blob = getBlobReference(key);
@@ -2086,7 +2097,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
           if (retrieveFolderAttribute(blob)) {
             LOG.debug("{} is a folder blob.", key);
             return new FileMetadata(key, properties.getLastModified().getTime(),
-                getPermissionStatus(blob), BlobMaterialization.Explicit);
+                getPermissionStatus(blob), BlobMaterialization.Explicit, hadoopBlockSize);
           } else {
 
             LOG.debug("{} is a normal blob.", key);
@@ -2095,7 +2106,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
                 key, // Always return denormalized key with metadata.
                 getDataLength(blob, properties),
                 properties.getLastModified().getTime(),
-                getPermissionStatus(blob));
+                getPermissionStatus(blob), hadoopBlockSize);
           }
         } catch(StorageException e){
           if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
@@ -2129,7 +2140,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
           BlobProperties properties = blob.getProperties();
 
           return new FileMetadata(key, properties.getLastModified().getTime(),
-              getPermissionStatus(blob), BlobMaterialization.Implicit);
+              getPermissionStatus(blob), BlobMaterialization.Implicit, hadoopBlockSize);
         }
       }
 
@@ -2178,46 +2189,13 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   }
 
   @Override
-  public PartialListing list(String prefix, final int maxListingCount,
+  public FileMetadata[] list(String prefix, final int maxListingCount,
       final int maxListingDepth) throws IOException {
-    return list(prefix, maxListingCount, maxListingDepth, null);
-  }
-
-  @Override
-  public PartialListing list(String prefix, final int maxListingCount,
-      final int maxListingDepth, String priorLastKey) throws IOException {
-    return list(prefix, PATH_DELIMITER, maxListingCount, maxListingDepth,
-        priorLastKey);
+    return listInternal(prefix, maxListingCount, maxListingDepth);
   }
 
-  @Override
-  public PartialListing listAll(String prefix, final int maxListingCount,
-      final int maxListingDepth, String priorLastKey) throws IOException {
-    return list(prefix, null, maxListingCount, maxListingDepth, priorLastKey);
-  }
-
-  /**
-   * Searches the given list of {@link FileMetadata} objects for a directory
-   * with the given key.
-   *
-   * @param list
-   *          The list to search.
-   * @param key
-   *          The key to search for.
-   * @return The wanted directory, or null if not found.
-   */
-  private static FileMetadata getFileMetadataInList(
-      final Iterable<FileMetadata> list, String key) {
-    for (FileMetadata current : list) {
-      if (current.getKey().equals(key)) {
-        return current;
-      }
-    }
-    return null;
-  }
-
-  private PartialListing list(String prefix, String delimiter,
-      final int maxListingCount, final int maxListingDepth, String priorLastKey)
+  private FileMetadata[] listInternal(String prefix, final int maxListingCount,
+      final int maxListingDepth)
       throws IOException {
     try {
       checkContainer(ContainerAccessType.PureRead);
@@ -2241,7 +2219,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
         objects = listRootBlobs(prefix, true, enableFlatListing);
       }
 
-      ArrayList<FileMetadata> fileMetadata = new ArrayList<FileMetadata>();
+      HashMap<String, FileMetadata> fileMetadata = new HashMap<>(256);
+
       for (ListBlobItem blobItem : objects) {
         // Check that the maximum listing count is not exhausted.
         //
@@ -2261,25 +2240,37 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
           FileMetadata metadata;
           if (retrieveFolderAttribute(blob)) {
-            metadata = new FileMetadata(blobKey,
-                properties.getLastModified().getTime(),
-                getPermissionStatus(blob),
-                BlobMaterialization.Explicit);
+              metadata = new FileMetadata(blobKey,
+                  properties.getLastModified().getTime(),
+                  getPermissionStatus(blob),
+                  BlobMaterialization.Explicit,
+                  hadoopBlockSize);
           } else {
-            metadata = new FileMetadata(
-                blobKey,
-                getDataLength(blob, properties),
-                properties.getLastModified().getTime(),
-                getPermissionStatus(blob));
+              metadata = new FileMetadata(
+                  blobKey,
+                  getDataLength(blob, properties),
+                  properties.getLastModified().getTime(),
+                  getPermissionStatus(blob),
+                  hadoopBlockSize);
           }
+          // Add the metadata but remove duplicates.  Note that the azure
+          // storage java SDK returns two types of entries: CloudBlobWrappter
+          // and CloudDirectoryWrapper.  In the case where WASB generated the
+          // data, there will be an empty blob for each "directory", and we will
+          // receive a CloudBlobWrapper.  If there are also files within this
+          // "directory", we will also receive a CloudDirectoryWrapper.  To
+          // complicate matters, the data may not be generated by WASB, in
+          // which case we may not have an empty blob for each "directory".
+          // So, sometimes we receive both a CloudBlobWrapper and a
+          // CloudDirectoryWrapper for each directory, and sometimes we receive
+          // one or the other but not both.  We remove duplicates, but
+          // prefer CloudBlobWrapper over CloudDirectoryWrapper.
+          // Furthermore, it is very unfortunate that the list results are not
+          // ordered, and it is a partial list which uses continuation.  So
+          // the HashMap is the best structure to remove the duplicates, despite
+          // its potential large size.
+          fileMetadata.put(blobKey, metadata);
 
-          // Add the metadata to the list, but remove any existing duplicate
-          // entries first that we may have added by finding nested files.
-          FileMetadata existing = getFileMetadataInList(fileMetadata, blobKey);
-          if (existing != null) {
-            fileMetadata.remove(existing);
-          }
-          fileMetadata.add(metadata);
         } else if (blobItem instanceof CloudBlobDirectoryWrapper) {
           CloudBlobDirectoryWrapper directory = (CloudBlobDirectoryWrapper) blobItem;
           // Determine format of directory name depending on whether an absolute
@@ -2298,12 +2289,15 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
           // inherit the permissions of the first non-directory blob.
           // Also, getting a proper value for last-modified is tricky.
           FileMetadata directoryMetadata = new FileMetadata(dirKey, 0,
-              defaultPermissionNoBlobMetadata(), BlobMaterialization.Implicit);
+              defaultPermissionNoBlobMetadata(), BlobMaterialization.Implicit,
+              hadoopBlockSize);
 
           // Add the directory metadata to the list only if it's not already
-          // there.
-          if (getFileMetadataInList(fileMetadata, dirKey) == null) {
-            fileMetadata.add(directoryMetadata);
+          // there.  See earlier note, we prefer CloudBlobWrapper over
+          // CloudDirectoryWrapper because it may have additional metadata (
+          // properties and ACLs).
+          if (!fileMetadata.containsKey(dirKey)) {
+            fileMetadata.put(dirKey, directoryMetadata);
           }
 
           if (!enableFlatListing) {
@@ -2314,13 +2308,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
           }
         }
       }
-      // Note: Original code indicated that this may be a hack.
-      priorLastKey = null;
-      PartialListing listing = new PartialListing(priorLastKey,
-          fileMetadata.toArray(new FileMetadata[] {}),
-          0 == fileMetadata.size() ? new String[] {}
-      : new String[] { prefix });
-      return listing;
+      return fileMetadata.values().toArray(new FileMetadata[fileMetadata.size()]);
     } catch (Exception e) {
       // Re-throw as an Azure storage exception.
       //
@@ -2334,13 +2322,13 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * the sorted order of the blob names.
    *
    * @param aCloudBlobDirectory Azure blob directory
-   * @param aFileMetadataList a list of file metadata objects for each
+   * @param metadataHashMap a map of file metadata objects for each
    *                          non-directory blob.
    * @param maxListingCount maximum length of the built up list.
    */
   private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
-      ArrayList<FileMetadata> aFileMetadataList, final int maxListingCount,
-      final int maxListingDepth) throws Exception {
+                           HashMap<String, FileMetadata> metadataHashMap, final int maxListingCount,
+                           final int maxListingDepth) throws Exception {
 
     // Push the blob directory onto the stack.
     //
@@ -2371,12 +2359,12 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     // (2) maxListingCount > 0 implies that the number of items in the
     // metadata list is less than the max listing count.
     while (null != blobItemIterator
-        && (maxListingCount <= 0 || aFileMetadataList.size() < maxListingCount)) {
+        && (maxListingCount <= 0 || metadataHashMap.size() < maxListingCount)) {
       while (blobItemIterator.hasNext()) {
         // Check if the count of items on the list exhausts the maximum
         // listing count.
         //
-        if (0 < maxListingCount && aFileMetadataList.size() >= maxListingCount) {
+        if (0 < maxListingCount && metadataHashMap.size() >= maxListingCount) {
           break;
         }
 
@@ -2399,22 +2387,34 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
             metadata = new FileMetadata(blobKey,
                 properties.getLastModified().getTime(),
                 getPermissionStatus(blob),
-                BlobMaterialization.Explicit);
+                BlobMaterialization.Explicit,
+                hadoopBlockSize);
           } else {
             metadata = new FileMetadata(
                 blobKey,
                 getDataLength(blob, properties),
                 properties.getLastModified().getTime(),
-                getPermissionStatus(blob));
+                getPermissionStatus(blob),
+                hadoopBlockSize);
           }
 
-          // Add the directory metadata to the list only if it's not already
-          // there.
-          FileMetadata existing = getFileMetadataInList(aFileMetadataList, blobKey);
-          if (existing != null) {
-            aFileMetadataList.remove(existing);
-          }
-          aFileMetadataList.add(metadata);
+          // Add the metadata but remove duplicates.  Note that the azure
+          // storage java SDK returns two types of entries: CloudBlobWrappter
+          // and CloudDirectoryWrapper.  In the case where WASB generated the
+          // data, there will be an empty blob for each "directory", and we will
+          // receive a CloudBlobWrapper.  If there are also files within this
+          // "directory", we will also receive a CloudDirectoryWrapper.  To
+          // complicate matters, the data may not be generated by WASB, in
+          // which case we may not have an empty blob for each "directory".
+          // So, sometimes we receive both a CloudBlobWrapper and a
+          // CloudDirectoryWrapper for each directory, and sometimes we receive
+          // one or the other but not both.  We remove duplicates, but
+          // prefer CloudBlobWrapper over CloudDirectoryWrapper.
+          // Furthermore, it is very unfortunate that the list results are not
+          // ordered, and it is a partial list which uses continuation.  So
+          // the HashMap is the best structure to remove the duplicates, despite
+          // its potential large size.
+          metadataHashMap.put(blobKey, metadata);
         } else if (blobItem instanceof CloudBlobDirectoryWrapper) {
           CloudBlobDirectoryWrapper directory = (CloudBlobDirectoryWrapper) blobItem;
 
@@ -2439,7 +2439,12 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
             // absolute path is being used or not.
             String dirKey = normalizeKey(directory);
 
-            if (getFileMetadataInList(aFileMetadataList, dirKey) == null) {
+            // Add the directory metadata to the list only if it's not already
+            // there.  See earlier note, we prefer CloudBlobWrapper over
+            // CloudDirectoryWrapper because it may have additional metadata (
+            // properties and ACLs).
+            if (!metadataHashMap.containsKey(dirKey)) {
+
               // Reached the targeted listing depth. Return metadata for the
               // directory using default permissions.
               //
@@ -2450,10 +2455,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
               FileMetadata directoryMetadata = new FileMetadata(dirKey,
                   0,
                   defaultPermissionNoBlobMetadata(),
-                  BlobMaterialization.Implicit);
+                  BlobMaterialization.Implicit,
+                  hadoopBlockSize);
 
               // Add the directory metadata to the list.
-              aFileMetadataList.add(directoryMetadata);
+              metadataHashMap.put(dirKey, directoryMetadata);
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/FileMetadata.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/FileMetadata.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/FileMetadata.java
index 5085a0f..cbf3ab9 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/FileMetadata.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/FileMetadata.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.fs.azure;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 
 /**
@@ -27,12 +29,9 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
  * </p>
  */
 @InterfaceAudience.Private
-class FileMetadata {
-  private final String key;
-  private final long length;
-  private final long lastModified;
-  private final boolean isDir;
-  private final PermissionStatus permissionStatus;
+class FileMetadata extends FileStatus {
+  // this is not final so that it can be cleared to save memory when not needed.
+  private String key;
   private final BlobMaterialization blobMaterialization;
 
   /**
@@ -46,16 +45,19 @@ class FileMetadata {
    *          The last modified date (milliseconds since January 1, 1970 UTC.)
    * @param permissionStatus
    *          The permission for the file.
+   * @param blockSize
+   *          The Hadoop file block size.
    */
   public FileMetadata(String key, long length, long lastModified,
-      PermissionStatus permissionStatus) {
+      PermissionStatus permissionStatus, final long blockSize) {
+    super(length, false, 1, blockSize, lastModified, 0,
+        permissionStatus.getPermission(),
+        permissionStatus.getUserName(),
+        permissionStatus.getGroupName(),
+        null);
     this.key = key;
-    this.length = length;
-    this.lastModified = lastModified;
-    this.isDir = false;
-    this.permissionStatus = permissionStatus;
-    this.blobMaterialization = BlobMaterialization.Explicit; // File are never
-                                                             // implicit.
+    // Files are never implicit.
+    this.blobMaterialization = BlobMaterialization.Explicit;
   }
 
   /**
@@ -70,37 +72,42 @@ class FileMetadata {
    * @param blobMaterialization
    *          Whether this is an implicit (no real blob backing it) or explicit
    *          directory.
+   * @param blockSize
+   *          The Hadoop file block size.
    */
   public FileMetadata(String key, long lastModified,
-      PermissionStatus permissionStatus, BlobMaterialization blobMaterialization) {
+      PermissionStatus permissionStatus, BlobMaterialization blobMaterialization,
+      final long blockSize) {
+    super(0, true, 1, blockSize, lastModified, 0,
+        permissionStatus.getPermission(),
+        permissionStatus.getUserName(),
+        permissionStatus.getGroupName(),
+        null);
     this.key = key;
-    this.isDir = true;
-    this.length = 0;
-    this.lastModified = lastModified;
-    this.permissionStatus = permissionStatus;
     this.blobMaterialization = blobMaterialization;
   }
 
-  public boolean isDir() {
-    return isDir;
+  @Override
+  public Path getPath() {
+    Path p = super.getPath();
+    if (p == null) {
+      // Don't store this yet to reduce memory usage, as it will
+      // stay in the Eden Space and later we will update it
+      // with the full canonicalized path.
+      p = NativeAzureFileSystem.keyToPath(key);
+    }
+    return p;
   }
 
+  /**
+   * Returns the Azure storage key for the file.  Used internally by the framework.
+   *
+   * @return The key for the file.
+   */
   public String getKey() {
     return key;
   }
 
-  public long getLength() {
-    return length;
-  }
-
-  public long getLastModified() {
-    return lastModified;
-  }
-
-  public PermissionStatus getPermissionStatus() {
-    return permissionStatus;
-  }
-
   /**
    * Indicates whether this is an implicit directory (no real blob backing it)
    * or an explicit one.
@@ -112,9 +119,7 @@ class FileMetadata {
     return blobMaterialization;
   }
 
-  @Override
-  public String toString() {
-    return "FileMetadata[" + key + ", " + length + ", " + lastModified + ", "
-        + permissionStatus + "]";
+  void removeKey() {
+    key = null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 5202762..f8962d9 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -31,9 +31,7 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.EnumSet;
-import java.util.Set;
 import java.util.TimeZone;
-import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.regex.Matcher;
@@ -129,20 +127,12 @@ public class NativeAzureFileSystem extends FileSystem {
       this.dstKey = dstKey;
       this.folderLease = lease;
       this.fs = fs;
-      ArrayList<FileMetadata> fileMetadataList = new ArrayList<FileMetadata>();
 
       // List all the files in the folder.
       long start = Time.monotonicNow();
-      String priorLastKey = null;
-      do {
-        PartialListing listing = fs.getStoreInterface().listAll(srcKey, AZURE_LIST_ALL,
-          AZURE_UNBOUNDED_DEPTH, priorLastKey);
-        for(FileMetadata file : listing.getFiles()) {
-          fileMetadataList.add(file);
-        }
-        priorLastKey = listing.getPriorLastKey();
-      } while (priorLastKey != null);
-      fileMetadata = fileMetadataList.toArray(new FileMetadata[fileMetadataList.size()]);
+      fileMetadata = fs.getStoreInterface().list(srcKey, AZURE_LIST_ALL,
+          AZURE_UNBOUNDED_DEPTH);
+
       long end = Time.monotonicNow();
       LOG.debug("Time taken to list {} blobs for rename operation is: {} ms", fileMetadata.length, (end - start));
 
@@ -669,7 +659,6 @@ public class NativeAzureFileSystem extends FileSystem {
 
   public static final Logger LOG = LoggerFactory.getLogger(NativeAzureFileSystem.class);
 
-  static final String AZURE_BLOCK_SIZE_PROPERTY_NAME = "fs.azure.block.size";
   /**
    * The time span in seconds before which we consider a temp blob to be
    * dangling (not being actively uploaded to) and up for reclamation.
@@ -685,8 +674,6 @@ public class NativeAzureFileSystem extends FileSystem {
   private static final int AZURE_LIST_ALL = -1;
   private static final int AZURE_UNBOUNDED_DEPTH = -1;
 
-  private static final long MAX_AZURE_BLOCK_SIZE = 512 * 1024 * 1024L;
-
   /**
    * The configuration property that determines which group owns files created
    * in WASB.
@@ -1196,7 +1183,6 @@ public class NativeAzureFileSystem extends FileSystem {
   private NativeFileSystemStore store;
   private AzureNativeFileSystemStore actualStore;
   private Path workingDir;
-  private long blockSize = MAX_AZURE_BLOCK_SIZE;
   private AzureFileSystemInstrumentation instrumentation;
   private String metricsSourceName;
   private boolean isClosed = false;
@@ -1361,13 +1347,10 @@ public class NativeAzureFileSystem extends FileSystem {
     this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
     this.workingDir = new Path("/user", UserGroupInformation.getCurrentUser()
         .getShortUserName()).makeQualified(getUri(), getWorkingDirectory());
-    this.blockSize = conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME,
-        MAX_AZURE_BLOCK_SIZE);
 
     this.appendSupportEnabled = conf.getBoolean(APPEND_SUPPORT_ENABLE_PROPERTY_NAME, false);
     LOG.debug("NativeAzureFileSystem. Initializing.");
-    LOG.debug("  blockSize  = {}",
-        conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE));
+    LOG.debug("  blockSize  = {}", store.getHadoopBlockSize());
 
     // Initialize thread counts from user configuration
     deleteThreadCount = conf.getInt(AZURE_DELETE_THREADS, DEFAULT_AZURE_DELETE_THREADS);
@@ -1491,7 +1474,7 @@ public class NativeAzureFileSystem extends FileSystem {
     }
   }
 
-  private static Path keyToPath(String key) {
+  static Path keyToPath(String key) {
     if (key.equals("/")) {
       return new Path("/"); // container
     }
@@ -1599,7 +1582,7 @@ public class NativeAzureFileSystem extends FileSystem {
       throw new FileNotFoundException(f.toString());
     }
 
-    if (meta.isDir()) {
+    if (meta.isDirectory()) {
       throw new FileNotFoundException(f.toString()
           + " is a directory not a file.");
     }
@@ -1815,7 +1798,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
     FileMetadata existingMetadata = store.retrieveMetadata(key);
     if (existingMetadata != null) {
-      if (existingMetadata.isDir()) {
+      if (existingMetadata.isDirectory()) {
         throw new FileAlreadyExistsException("Cannot create file " + f
             + "; already exists as a directory.");
       }
@@ -1833,7 +1816,7 @@ public class NativeAzureFileSystem extends FileSystem {
       // already exists.
       String parentKey = pathToKey(parentFolder);
       FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
-      if (parentMetadata != null && parentMetadata.isDir() &&
+      if (parentMetadata != null && parentMetadata.isDirectory() &&
         parentMetadata.getBlobMaterialization() == BlobMaterialization.Explicit) {
         if (parentFolderLease != null) {
           store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
@@ -1850,7 +1833,7 @@ public class NativeAzureFileSystem extends FileSystem {
           firstExisting = firstExisting.getParent();
           metadata = store.retrieveMetadata(pathToKey(firstExisting));
         }
-        mkdirs(parentFolder, metadata.getPermissionStatus().getPermission(), true);
+        mkdirs(parentFolder, metadata.getPermission(), true);
       }
     }
 
@@ -1988,7 +1971,7 @@ public class NativeAzureFileSystem extends FileSystem {
               + parentPath + " whose metadata cannot be retrieved. Can't resolve");
       }
 
-      if (!parentMetadata.isDir()) {
+      if (!parentMetadata.isDirectory()) {
          // Invalid state: the parent path is actually a file. Throw.
          throw new AzureException("File " + f + " has a parent directory "
              + parentPath + " which is also a file. Can't resolve.");
@@ -1997,7 +1980,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
     // The path exists, determine if it is a folder containing objects,
     // an empty folder, or a simple file and take the appropriate actions.
-    if (!metaFile.isDir()) {
+    if (!metaFile.isDirectory()) {
       // The path specifies a file. We need to check the parent path
       // to make sure it's a proper materialized directory before we
       // delete the file. Otherwise we may get into a situation where
@@ -2114,9 +2097,9 @@ public class NativeAzureFileSystem extends FileSystem {
       AzureFileSystemThreadTask task = new AzureFileSystemThreadTask() {
         @Override
         public boolean execute(FileMetadata file) throws IOException{
-          if (!deleteFile(file.getKey(), file.isDir())) {
+          if (!deleteFile(file.getKey(), file.isDirectory())) {
             LOG.warn("Attempt to delete non-existent {} {}",
-                file.isDir() ? "directory" : "file",
+                file.isDirectory() ? "directory" : "file",
                 file.getKey());
           }
           return true;
@@ -2138,7 +2121,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
       // Delete the current directory if all underlying contents are deleted
       if (isPartialDelete || (store.retrieveMetadata(metaFile.getKey()) != null
-          && !deleteFile(metaFile.getKey(), metaFile.isDir()))) {
+          && !deleteFile(metaFile.getKey(), metaFile.isDirectory()))) {
         LOG.error("Failed delete directory : {}", f);
         return false;
       }
@@ -2191,7 +2174,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
     // The path exists, determine if it is a folder containing objects,
     // an empty folder, or a simple file and take the appropriate actions.
-    if (!metaFile.isDir()) {
+    if (!metaFile.isDirectory()) {
       // The path specifies a file. We need to check the parent path
       // to make sure it's a proper materialized directory before we
       // delete the file. Otherwise we may get into a situation where
@@ -2234,7 +2217,7 @@ public class NativeAzureFileSystem extends FileSystem {
               + parentPath + " whose metadata cannot be retrieved. Can't resolve");
         }
 
-        if (!parentMetadata.isDir()) {
+        if (!parentMetadata.isDirectory()) {
           // Invalid state: the parent path is actually a file. Throw.
           throw new AzureException("File " + f + " has a parent directory "
               + parentPath + " which is also a file. Can't resolve.");
@@ -2319,38 +2302,27 @@ public class NativeAzureFileSystem extends FileSystem {
         }
       }
 
-      // List all the blobs in the current folder.
-      String priorLastKey = null;
-
       // Start time for list operation
       long start = Time.monotonicNow();
-      ArrayList<FileMetadata> fileMetadataList = new ArrayList<FileMetadata>();
+      final FileMetadata[] contents;
 
       // List all the files in the folder with AZURE_UNBOUNDED_DEPTH depth.
-      do {
-        try {
-          PartialListing listing = store.listAll(key, AZURE_LIST_ALL,
-            AZURE_UNBOUNDED_DEPTH, priorLastKey);
-          for(FileMetadata file : listing.getFiles()) {
-            fileMetadataList.add(file);
-          }
-          priorLastKey = listing.getPriorLastKey();
-        } catch (IOException e) {
-          Throwable innerException = checkForAzureStorageException(e);
-
-          if (innerException instanceof StorageException
-              && isFileNotFoundException((StorageException) innerException)) {
-            return false;
-          }
+      try {
+        contents = store.list(key, AZURE_LIST_ALL,
+            AZURE_UNBOUNDED_DEPTH);
+      } catch (IOException e) {
+        Throwable innerException = checkForAzureStorageException(e);
 
-          throw e;
+        if (innerException instanceof StorageException
+            && isFileNotFoundException((StorageException) innerException)) {
+          return false;
         }
-      } while (priorLastKey != null);
 
-      long end = Time.monotonicNow();
-      LOG.debug("Time taken to list {} blobs for delete operation: {} ms", fileMetadataList.size(), (end - start));
+        throw e;
+      }
 
-      final FileMetadata[] contents = fileMetadataList.toArray(new FileMetadata[fileMetadataList.size()]);
+      long end = Time.monotonicNow();
+      LOG.debug("Time taken to list {} blobs for delete operation: {} ms", contents.length, (end - start));
 
       if (contents.length > 0) {
         if (!recursive) {
@@ -2365,9 +2337,9 @@ public class NativeAzureFileSystem extends FileSystem {
       AzureFileSystemThreadTask task = new AzureFileSystemThreadTask() {
         @Override
         public boolean execute(FileMetadata file) throws IOException{
-          if (!deleteFile(file.getKey(), file.isDir())) {
+          if (!deleteFile(file.getKey(), file.isDirectory())) {
             LOG.warn("Attempt to delete non-existent {} {}",
-                file.isDir() ? "directory" : "file",
+                file.isDirectory() ? "directory" : "file",
                 file.getKey());
           }
           return true;
@@ -2384,7 +2356,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
       // Delete the current directory
       if (store.retrieveMetadata(metaFile.getKey()) != null
-          && !deleteFile(metaFile.getKey(), metaFile.isDir())) {
+          && !deleteFile(metaFile.getKey(), metaFile.isDirectory())) {
         LOG.error("Failed delete directory : {}", f);
         return false;
       }
@@ -2456,13 +2428,13 @@ public class NativeAzureFileSystem extends FileSystem {
 
     boolean isPartialDelete = false;
 
-    Path pathToDelete = makeAbsolute(keyToPath(folderToDelete.getKey()));
+    Path pathToDelete = makeAbsolute(folderToDelete.getPath());
     foldersToProcess.push(folderToDelete);
 
     while (!foldersToProcess.empty()) {
 
       FileMetadata currentFolder = foldersToProcess.pop();
-      Path currentPath = makeAbsolute(keyToPath(currentFolder.getKey()));
+      Path currentPath = makeAbsolute(currentFolder.getPath());
       boolean canDeleteChildren = true;
 
       // If authorization is enabled, check for 'write' permission on current folder
@@ -2478,8 +2450,8 @@ public class NativeAzureFileSystem extends FileSystem {
       if (canDeleteChildren) {
 
         // get immediate children list
-        ArrayList<FileMetadata> fileMetadataList = getChildrenMetadata(currentFolder.getKey(),
-            maxListingDepth);
+        FileMetadata[] fileMetadataList = store.list(currentFolder.getKey(),
+            AZURE_LIST_ALL, maxListingDepth);
 
         // Process children of currentFolder and add them to list of contents
         // that can be deleted. We Perform stickybit check on every file and
@@ -2490,12 +2462,12 @@ public class NativeAzureFileSystem extends FileSystem {
             // This file/folder cannot be deleted and neither can the parent paths be deleted.
             // Remove parent paths from list of contents that can be deleted.
             canDeleteChildren = false;
-            Path filePath = makeAbsolute(keyToPath(childItem.getKey()));
+            Path filePath = makeAbsolute(childItem.getPath());
             LOG.error("User does not have permissions to delete {}. "
               + "Parent directory has sticky bit set.", filePath);
           } else {
             // push the child directories to the stack to process their contents
-            if (childItem.isDir()) {
+            if (childItem.isDirectory()) {
               foldersToProcess.push(childItem);
             }
             // Add items to list of contents that can be deleted.
@@ -2540,23 +2512,6 @@ public class NativeAzureFileSystem extends FileSystem {
     return isPartialDelete;
   }
 
-  private ArrayList<FileMetadata> getChildrenMetadata(String key, int maxListingDepth)
-    throws IOException {
-
-    String priorLastKey = null;
-    ArrayList<FileMetadata> fileMetadataList = new ArrayList<FileMetadata>();
-    do {
-       PartialListing listing = store.listAll(key, AZURE_LIST_ALL,
-         maxListingDepth, priorLastKey);
-       for (FileMetadata file : listing.getFiles()) {
-         fileMetadataList.add(file);
-       }
-       priorLastKey = listing.getPriorLastKey();
-    } while (priorLastKey != null);
-
-    return fileMetadataList;
-  }
-
   private boolean isStickyBitCheckViolated(FileMetadata metaData,
     FileMetadata parentMetadata, boolean throwOnException) throws IOException {
       try {
@@ -2602,13 +2557,13 @@ public class NativeAzureFileSystem extends FileSystem {
     }
 
     // stickybit is not set on parent and hence cannot be violated
-    if (!parentMetadata.getPermissionStatus().getPermission().getStickyBit()) {
+    if (!parentMetadata.getPermission().getStickyBit()) {
       return false;
     }
 
     String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
-    String parentDirectoryOwner = parentMetadata.getPermissionStatus().getUserName();
-    String currentFileOwner = metaData.getPermissionStatus().getUserName();
+    String parentDirectoryOwner = parentMetadata.getOwner();
+    String currentFileOwner = metaData.getOwner();
 
     // Files/Folders with no owner set will not pass stickybit check
     if ((parentDirectoryOwner.equalsIgnoreCase(currentUser))
@@ -2687,7 +2642,15 @@ public class NativeAzureFileSystem extends FileSystem {
     Path absolutePath = makeAbsolute(f);
     String key = pathToKey(absolutePath);
     if (key.length() == 0) { // root always exists
-      return newDirectory(null, absolutePath);
+      return new FileStatus(
+          0,
+          true,
+          1,
+          store.getHadoopBlockSize(),
+          0,
+          0,
+          FsPermission.getDefault(), "", "",
+          absolutePath.makeQualified(getUri(), getWorkingDirectory()));
     }
 
     // The path is either a folder or a file. Retrieve metadata to
@@ -2709,7 +2672,7 @@ public class NativeAzureFileSystem extends FileSystem {
     }
 
     if (meta != null) {
-      if (meta.isDir()) {
+      if (meta.isDirectory()) {
         // The path is a folder with files in it.
         //
 
@@ -2723,14 +2686,14 @@ public class NativeAzureFileSystem extends FileSystem {
         }
 
         // Return reference to the directory object.
-        return newDirectory(meta, absolutePath);
+        return updateFileStatusPath(meta, absolutePath);
       }
 
       // The path is a file.
       LOG.debug("Found the path: {} as a file.", f.toString());
 
       // Return with reference to a file object.
-      return newFile(meta, absolutePath);
+      return updateFileStatusPath(meta, absolutePath);
     }
 
     // File not found. Throw exception no such file or directory.
@@ -2787,7 +2750,7 @@ public class NativeAzureFileSystem extends FileSystem {
     performAuthCheck(absolutePath, WasbAuthorizationOperations.READ, "liststatus", absolutePath);
 
     String key = pathToKey(absolutePath);
-    Set<FileStatus> status = new TreeSet<FileStatus>();
+
     FileMetadata meta = null;
     try {
       meta = store.retrieveMetadata(key);
@@ -2804,101 +2767,93 @@ public class NativeAzureFileSystem extends FileSystem {
       throw ex;
     }
 
-    if (meta != null) {
-      if (!meta.isDir()) {
-
-        LOG.debug("Found path as a file");
-
-        return new FileStatus[] { newFile(meta, absolutePath) };
-      }
-
-      String partialKey = null;
-      PartialListing listing = null;
-
-      try {
-        listing  = store.list(key, AZURE_LIST_ALL, 1, partialKey);
-      } catch (IOException ex) {
-
-        Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
-
-        if (innerException instanceof StorageException
-            && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
+    if (meta == null) {
+      // There is no metadata found for the path.
+      LOG.debug("Did not find any metadata for path: {}", key);
+      throw new FileNotFoundException(f + " is not found");
+    }
 
-            throw new FileNotFoundException(String.format("%s is not found", key));
-        }
+    if (!meta.isDirectory()) {
+      LOG.debug("Found path as a file");
+      return new FileStatus[] { updateFileStatusPath(meta, absolutePath) };
+    }
 
-        throw ex;
-      }
-      // NOTE: We don't check for Null condition as the Store API should return
-      // an empty list if there are not listing.
+    FileMetadata[] listing;
 
-      // For any -RenamePending.json files in the listing,
-      // push the rename forward.
-      boolean renamed = conditionalRedoFolderRenames(listing);
+    listing = listWithErrorHandling(key, AZURE_LIST_ALL, 1);
 
-      // If any renames were redone, get another listing,
-      // since the current one may have changed due to the redo.
-      if (renamed) {
-       listing = null;
-       try {
-         listing = store.list(key, AZURE_LIST_ALL, 1, partialKey);
-       } catch (IOException ex) {
-         Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
+    // NOTE: We don't check for Null condition as the Store API should return
+    // an empty list if there are not listing.
 
-         if (innerException instanceof StorageException
-             && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
+    // For any -RenamePending.json files in the listing,
+    // push the rename forward.
+    boolean renamed = conditionalRedoFolderRenames(listing);
 
-           throw new FileNotFoundException(String.format("%s is not found", key));
-         }
+    // If any renames were redone, get another listing,
+    // since the current one may have changed due to the redo.
+    if (renamed) {
+      listing = listWithErrorHandling(key, AZURE_LIST_ALL, 1);
+    }
 
-         throw ex;
-       }
-      }
+    // We only need to check for AZURE_TEMP_FOLDER if the key is the root,
+    // and if it is not the root we also know the exact size of the array
+    // of FileStatus.
 
-      // NOTE: We don't check for Null condition as the Store API should return
-      // and empty list if there are not listing.
+    FileMetadata[] result = null;
 
-      for (FileMetadata fileMetadata : listing.getFiles()) {
-        Path subpath = keyToPath(fileMetadata.getKey());
+    if (key.equals("/")) {
+      ArrayList<FileMetadata> status = new ArrayList<>(listing.length);
 
-        // Test whether the metadata represents a file or directory and
-        // add the appropriate metadata object.
-        //
-        // Note: There was a very old bug here where directories were added
-        // to the status set as files flattening out recursive listings
-        // using "-lsr" down the file system hierarchy.
-        if (fileMetadata.isDir()) {
+      for (FileMetadata fileMetadata : listing) {
+        if (fileMetadata.isDirectory()) {
           // Make sure we hide the temp upload folder
           if (fileMetadata.getKey().equals(AZURE_TEMP_FOLDER)) {
             // Don't expose that.
             continue;
           }
-          status.add(newDirectory(fileMetadata, subpath));
+          status.add(updateFileStatusPath(fileMetadata, fileMetadata.getPath()));
         } else {
-          status.add(newFile(fileMetadata, subpath));
+          status.add(updateFileStatusPath(fileMetadata, fileMetadata.getPath()));
         }
       }
+      result = status.toArray(new FileMetadata[0]);
+    } else {
+      for (int i = 0; i < listing.length; i++) {
+        FileMetadata fileMetadata = listing[i];
+          listing[i] = updateFileStatusPath(fileMetadata, fileMetadata.getPath());
+      }
+      result = listing;
+    }
 
-      LOG.debug("Found path as a directory with {}"
-          + " files in it.", status.size());
+    LOG.debug("Found path as a directory with {}"
+        + " files in it.", result.length);
 
-    } else {
-      // There is no metadata found for the path.
-      LOG.debug("Did not find any metadata for path: {}", key);
+    return result;
+  }
 
-      throw new FileNotFoundException(f + " is not found");
+  private FileMetadata[] listWithErrorHandling(String prefix, final int maxListingCount,
+                                              final int maxListingDepth) throws IOException {
+    try {
+      return store.list(prefix, maxListingCount, maxListingDepth);
+    } catch (IOException ex) {
+      Throwable innerException
+          = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
+      if (innerException instanceof StorageException
+          && NativeAzureFileSystemHelper.isFileNotFoundException(
+          (StorageException) innerException)) {
+        throw new FileNotFoundException(String.format("%s is not found", prefix));
+      }
+      throw ex;
     }
-
-    return status.toArray(new FileStatus[0]);
   }
 
   // Redo any folder renames needed if there are rename pending files in the
   // directory listing. Return true if one or more redo operations were done.
-  private boolean conditionalRedoFolderRenames(PartialListing listing)
+  private boolean conditionalRedoFolderRenames(FileMetadata[] listing)
       throws IllegalArgumentException, IOException {
     boolean renamed = false;
-    for (FileMetadata fileMetadata : listing.getFiles()) {
-      Path subpath = keyToPath(fileMetadata.getKey());
+    for (FileMetadata fileMetadata : listing) {
+      Path subpath = fileMetadata.getPath();
       if (isRenamePendingFile(subpath)) {
         FolderRenamePending pending =
             new FolderRenamePending(subpath, this);
@@ -2914,32 +2869,11 @@ public class NativeAzureFileSystem extends FileSystem {
     return path.toString().endsWith(FolderRenamePending.SUFFIX);
   }
 
-  private FileStatus newFile(FileMetadata meta, Path path) {
-    return new FileStatus (
-        meta.getLength(),
-        false,
-        1,
-        blockSize,
-        meta.getLastModified(),
-        0,
-        meta.getPermissionStatus().getPermission(),
-        meta.getPermissionStatus().getUserName(),
-        meta.getPermissionStatus().getGroupName(),
-        path.makeQualified(getUri(), getWorkingDirectory()));
-  }
-
-  private FileStatus newDirectory(FileMetadata meta, Path path) {
-    return new FileStatus (
-        0,
-        true,
-        1,
-        blockSize,
-        meta == null ? 0 : meta.getLastModified(),
-        0,
-        meta == null ? FsPermission.getDefault() : meta.getPermissionStatus().getPermission(),
-        meta == null ? "" : meta.getPermissionStatus().getUserName(),
-        meta == null ? "" : meta.getPermissionStatus().getGroupName(),
-        path.makeQualified(getUri(), getWorkingDirectory()));
+  private FileMetadata updateFileStatusPath(FileMetadata meta, Path path) {
+    meta.setPath(path.makeQualified(getUri(), getWorkingDirectory()));
+    // reduce memory use by setting the internal-only key to null
+    meta.removeKey();
+    return meta;
   }
 
   private static enum UMaskApplyMode {
@@ -3000,8 +2934,8 @@ public class NativeAzureFileSystem extends FileSystem {
 
       String currentKey = pathToKey(current);
       FileMetadata currentMetadata = store.retrieveMetadata(currentKey);
-      if (currentMetadata != null && currentMetadata.isDir()) {
-        Path ancestor = keyToPath(currentMetadata.getKey());
+      if (currentMetadata != null && currentMetadata.isDirectory()) {
+        Path ancestor = currentMetadata.getPath();
         LOG.debug("Found ancestor {}, for path: {}", ancestor.toString(), f.toString());
         return ancestor;
       }
@@ -3052,7 +2986,7 @@ public class NativeAzureFileSystem extends FileSystem {
         current = parent, parent = current.getParent()) {
       String currentKey = pathToKey(current);
       FileMetadata currentMetadata = store.retrieveMetadata(currentKey);
-      if (currentMetadata != null && !currentMetadata.isDir()) {
+      if (currentMetadata != null && !currentMetadata.isDirectory()) {
         throw new FileAlreadyExistsException("Cannot create directory " + f + " because "
             + current + " is an existing file.");
       } else if (currentMetadata == null) {
@@ -3099,7 +3033,7 @@ public class NativeAzureFileSystem extends FileSystem {
     if (meta == null) {
       throw new FileNotFoundException(f.toString());
     }
-    if (meta.isDir()) {
+    if (meta.isDirectory()) {
       throw new FileNotFoundException(f.toString()
           + " is a directory not a file.");
     }
@@ -3120,7 +3054,7 @@ public class NativeAzureFileSystem extends FileSystem {
     }
 
     return new FSDataInputStream(new BufferedFSInputStream(
-        new NativeAzureFsInputStream(inputStream, key, meta.getLength()), bufferSize));
+        new NativeAzureFsInputStream(inputStream, key, meta.getLen()), bufferSize));
   }
 
   @Override
@@ -3196,7 +3130,7 @@ public class NativeAzureFileSystem extends FileSystem {
       }
     }
 
-    if (dstMetadata != null && dstMetadata.isDir()) {
+    if (dstMetadata != null && dstMetadata.isDirectory()) {
       // It's an existing directory.
       performAuthCheck(absoluteDstPath, WasbAuthorizationOperations.WRITE, "rename",
           absoluteDstPath);
@@ -3232,7 +3166,7 @@ public class NativeAzureFileSystem extends FileSystem {
         LOG.debug("Parent of the destination {}"
             + " doesn't exist, failing the rename.", dst);
         return false;
-      } else if (!parentOfDestMetadata.isDir()) {
+      } else if (!parentOfDestMetadata.isDirectory()) {
         LOG.debug("Parent of the destination {}"
             + " is a file, failing the rename.", dst);
         return false;
@@ -3261,7 +3195,7 @@ public class NativeAzureFileSystem extends FileSystem {
       // Source doesn't exist
       LOG.debug("Source {} doesn't exist, failing the rename.", src);
       return false;
-    } else if (!srcMetadata.isDir()) {
+    } else if (!srcMetadata.isDirectory()) {
       LOG.debug("Source {} found as a file, renaming.", src);
       try {
         // HADOOP-15086 - file rename must ensure that the destination does
@@ -3335,7 +3269,7 @@ public class NativeAzureFileSystem extends FileSystem {
       // single file. In this case, the parent folder no longer exists if the
       // file is renamed; so we can safely ignore the null pointer case.
       if (parentMetadata != null) {
-        if (parentMetadata.isDir()
+        if (parentMetadata.isDirectory()
             && parentMetadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
           store.storeEmptyFolder(parentKey,
               createPermissionStatus(FsPermission.getDefault()));
@@ -3511,7 +3445,7 @@ public class NativeAzureFileSystem extends FileSystem {
           && !isAllowedUser(currentUgi.getShortUserName(), daemonUsers)) {
 
         //Check if the user is the owner of the file.
-        String owner = metadata.getPermissionStatus().getUserName();
+        String owner = metadata.getOwner();
         if (!currentUgi.getShortUserName().equals(owner)) {
           throw new WasbAuthorizationException(
               String.format("user '%s' does not have the privilege to "
@@ -3522,16 +3456,16 @@ public class NativeAzureFileSystem extends FileSystem {
     }
 
     permission = applyUMask(permission,
-        metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
+        metadata.isDirectory() ? UMaskApplyMode.ChangeExistingDirectory
             : UMaskApplyMode.ChangeExistingFile);
     if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
       // It's an implicit folder, need to materialize it.
       store.storeEmptyFolder(key, createPermissionStatus(permission));
-    } else if (!metadata.getPermissionStatus().getPermission().
+    } else if (!metadata.getPermission().
         equals(permission)) {
       store.changePermissionStatus(key, new PermissionStatus(
-          metadata.getPermissionStatus().getUserName(),
-          metadata.getPermissionStatus().getGroupName(),
+          metadata.getOwner(),
+          metadata.getGroup(),
           permission));
     }
   }
@@ -3579,10 +3513,10 @@ public class NativeAzureFileSystem extends FileSystem {
 
     PermissionStatus newPermissionStatus = new PermissionStatus(
         username == null ?
-            metadata.getPermissionStatus().getUserName() : username,
+            metadata.getOwner() : username,
         groupname == null ?
-            metadata.getPermissionStatus().getGroupName() : groupname,
-        metadata.getPermissionStatus().getPermission());
+            metadata.getGroup() : groupname,
+        metadata.getPermission());
     if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
       // It's an implicit folder, need to materialize it.
       store.storeEmptyFolder(key, newPermissionStatus);
@@ -3778,30 +3712,26 @@ public class NativeAzureFileSystem extends FileSystem {
             AZURE_TEMP_EXPIRY_DEFAULT) * 1000;
     // Go over all the blobs under the given root and look for blobs to
     // recover.
-    String priorLastKey = null;
-    do {
-      PartialListing listing = store.listAll(pathToKey(root), AZURE_LIST_ALL,
-          AZURE_UNBOUNDED_DEPTH, priorLastKey);
-
-      for (FileMetadata file : listing.getFiles()) {
-        if (!file.isDir()) { // We don't recover directory blobs
-          // See if this blob has a link in it (meaning it's a place-holder
-          // blob for when the upload to the temp blob is complete).
-          String link = store.getLinkInFileMetadata(file.getKey());
-          if (link != null) {
-            // It has a link, see if the temp blob it is pointing to is
-            // existent and old enough to be considered dangling.
-            FileMetadata linkMetadata = store.retrieveMetadata(link);
-            if (linkMetadata != null
-                && linkMetadata.getLastModified() >= cutoffForDangling) {
-              // Found one!
-              handler.handleFile(file, linkMetadata);
-            }
+    FileMetadata[] listing = store.list(pathToKey(root), AZURE_LIST_ALL,
+        AZURE_UNBOUNDED_DEPTH);
+
+    for (FileMetadata file : listing) {
+      if (!file.isDirectory()) { // We don't recover directory blobs
+        // See if this blob has a link in it (meaning it's a place-holder
+        // blob for when the upload to the temp blob is complete).
+        String link = store.getLinkInFileMetadata(file.getKey());
+        if (link != null) {
+          // It has a link, see if the temp blob it is pointing to is
+          // existent and old enough to be considered dangling.
+          FileMetadata linkMetadata = store.retrieveMetadata(link);
+          if (linkMetadata != null
+              && linkMetadata.getModificationTime() >= cutoffForDangling) {
+            // Found one!
+            handler.handleFile(file, linkMetadata);
           }
         }
       }
-      priorLastKey = listing.getPriorLastKey();
-    } while (priorLastKey != null);
+    }
   }
 
   /**
@@ -3888,7 +3818,7 @@ public class NativeAzureFileSystem extends FileSystem {
       meta = store.retrieveMetadata(key);
 
       if (meta != null) {
-        owner = meta.getPermissionStatus().getUserName();
+        owner = meta.getOwner();
         LOG.debug("Retrieved '{}' as owner for path - {}", owner, absolutePath);
       } else {
         // meta will be null if file/folder doen not exist

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
index b67ab1b..36e3819 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
@@ -58,20 +58,21 @@ interface NativeFileSystemStore {
 
   boolean isAtomicRenameKey(String key);
 
+  /**
+   * Returns the file block size.  This is a fake value used for integration
+   * of the Azure store with Hadoop.
+   * @return The file block size.
+   */
+  long getHadoopBlockSize();
+
   void storeEmptyLinkFile(String key, String tempBlobKey,
       PermissionStatus permissionStatus) throws AzureException;
 
   String getLinkInFileMetadata(String key) throws AzureException;
 
-  PartialListing list(String prefix, final int maxListingCount,
+  FileMetadata[] list(String prefix, final int maxListingCount,
       final int maxListingDepth) throws IOException;
 
-  PartialListing list(String prefix, final int maxListingCount,
-      final int maxListingDepth, String priorLastKey) throws IOException;
-
-  PartialListing listAll(String prefix, final int maxListingCount,
-      final int maxListingDepth, String priorLastKey) throws IOException;
-
   void changePermissionStatus(String key, PermissionStatus newPermission)
       throws AzureException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PartialListing.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PartialListing.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PartialListing.java
deleted file mode 100644
index 4a80d2e..0000000
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PartialListing.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azure;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * <p>
- * Holds information on a directory listing for a {@link NativeFileSystemStore}.
- * This includes the {@link FileMetadata files} and directories (their names)
- * contained in a directory.
- * </p>
- * <p>
- * This listing may be returned in chunks, so a <code>priorLastKey</code> is
- * provided so that the next chunk may be requested.
- * </p>
- *
- * @see NativeFileSystemStore#list(String, int, String)
- */
-@InterfaceAudience.Private
-class PartialListing {
-
-  private final String priorLastKey;
-  private final FileMetadata[] files;
-  private final String[] commonPrefixes;
-
-  public PartialListing(String priorLastKey, FileMetadata[] files,
-      String[] commonPrefixes) {
-    this.priorLastKey = priorLastKey;
-    this.files = files;
-    this.commonPrefixes = commonPrefixes;
-  }
-
-  public FileMetadata[] getFiles() {
-    return files;
-  }
-
-  public String[] getCommonPrefixes() {
-    return commonPrefixes;
-  }
-
-  public String getPriorLastKey() {
-    return priorLastKey;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java
new file mode 100644
index 0000000..e7a3fa8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import org.junit.Assume;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest;
+import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+
+/**
+ * Test list performance.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+
+public class ITestListPerformance extends AbstractAzureScaleTest {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ITestListPerformance.class);
+
+  private static final Path TEST_DIR_PATH = new Path(
+      "DirectoryWithManyFiles");
+
+  private static final int NUMBER_OF_THREADS = 10;
+  private static final int NUMBER_OF_FILES_PER_THREAD = 1000;
+
+  private int threads;
+
+  private int filesPerThread;
+
+  private int expectedFileCount;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    Configuration conf = getConfiguration();
+    // fail fast
+    threads = AzureTestUtils.getTestPropertyInt(conf,
+        "fs.azure.scale.test.list.performance.threads", NUMBER_OF_THREADS);
+    filesPerThread = AzureTestUtils.getTestPropertyInt(conf,
+        "fs.azure.scale.test.list.performance.files", NUMBER_OF_FILES_PER_THREAD);
+    expectedFileCount = threads * filesPerThread;
+    LOG.info("Thread = {}, Files per Thread = {}, expected files = {}",
+        threads, filesPerThread, expectedFileCount);
+    conf.set("fs.azure.io.retry.max.retries", "1");
+    conf.set("fs.azure.delete.threads", "16");
+    createTestAccount();
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.create(
+        "itestlistperformance",
+        EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+        null,
+        true);
+  }
+
+  @Test
+  public void test_0101_CreateDirectoryWithFiles() throws Exception {
+    Assume.assumeFalse("Test path exists; skipping", fs.exists(TEST_DIR_PATH));
+
+    ExecutorService executorService = Executors.newFixedThreadPool(threads);
+    CloudBlobContainer container = testAccount.getRealContainer();
+
+    final String basePath = (fs.getWorkingDirectory().toUri().getPath() + "/" + TEST_DIR_PATH + "/").substring(1);
+
+    ArrayList<Callable<Integer>> tasks = new ArrayList<>(threads);
+    fs.mkdirs(TEST_DIR_PATH);
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    for (int i = 0; i < threads; i++) {
+      tasks.add(
+          new Callable<Integer>() {
+            public Integer call() {
+              int written = 0;
+              for (int j = 0; j < filesPerThread; j++) {
+                String blobName = basePath + UUID.randomUUID().toString();
+                try {
+                  CloudBlockBlob blob = container.getBlockBlobReference(
+                      blobName);
+                  blob.uploadText("");
+                  written ++;
+                } catch (Exception e) {
+                  LOG.error("Filed to write {}", blobName, e);
+                  break;
+                }
+              }
+              LOG.info("Thread completed with {} files written", written);
+              return written;
+            }
+          }
+      );
+    }
+
+    List<Future<Integer>> futures = executorService.invokeAll(tasks,
+        getTestTimeoutMillis(), TimeUnit.MILLISECONDS);
+    long elapsedMs = timer.elapsedTimeMs();
+    LOG.info("time to create files: {} millis", elapsedMs);
+
+    for (Future<Integer> future : futures) {
+      assertTrue("Future timed out", future.isDone());
+      assertEquals("Future did not write all files timed out",
+          filesPerThread, future.get().intValue());
+    }
+  }
+
+  @Test
+  public void test_0200_ListStatusPerformance() throws Exception {
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    FileStatus[] fileList = fs.listStatus(TEST_DIR_PATH);
+    long elapsedMs = timer.elapsedTimeMs();
+    LOG.info(String.format(
+        "files=%1$d, elapsedMs=%2$d",
+        fileList.length,
+        elapsedMs));
+    Map<Path, FileStatus> foundInList =new HashMap<>(expectedFileCount);
+
+    for (FileStatus fileStatus : fileList) {
+      foundInList.put(fileStatus.getPath(), fileStatus);
+      LOG.info("{}: {}", fileStatus.getPath(),
+          fileStatus.isDirectory() ? "dir" : "file");
+    }
+    assertEquals("Mismatch between expected files and actual",
+        expectedFileCount, fileList.length);
+
+
+    // now do a listFiles() recursive
+    ContractTestUtils.NanoTimer initialStatusCallTimer
+        = new ContractTestUtils.NanoTimer();
+    RemoteIterator<LocatedFileStatus> listing
+        = fs.listFiles(TEST_DIR_PATH, true);
+    long initialListTime = initialStatusCallTimer.elapsedTimeMs();
+    timer = new ContractTestUtils.NanoTimer();
+    while (listing.hasNext()) {
+      FileStatus fileStatus = listing.next();
+      Path path = fileStatus.getPath();
+      FileStatus removed = foundInList.remove(path);
+      assertNotNull("Did not find "  + path + "{} in the previous listing",
+          removed);
+    }
+    elapsedMs = timer.elapsedTimeMs();
+    LOG.info("time for listFiles() initial call: {} millis;"
+        + " time to iterate: {} millis", initialListTime, elapsedMs);
+    assertEquals("Not all files from listStatus() were found in listFiles()",
+        0, foundInList.size());
+
+  }
+
+  @Test
+  public void test_0300_BulkDeletePerformance() throws Exception {
+    ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
+    fs.delete(TEST_DIR_PATH,true);
+    long elapsedMs = timer.elapsedTimeMs();
+    LOG.info("time for delete(): {} millis; {} nanoS per file",
+        elapsedMs, timer.nanosPerOperation(expectedFileCount));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] hadoop git commit: YARN-8301. Added YARN service upgrade instructions. Contributed by Chandni Singh

Posted by in...@apache.org.
YARN-8301.  Added YARN service upgrade instructions.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1622a4b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1622a4b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1622a4b8

Branch: refs/heads/HADOOP-15461
Commit: 1622a4b810eaf9c4fe9f9ad6bef6b49db7bec16f
Parents: 347c955
Author: Eric Yang <ey...@apache.org>
Authored: Fri Jul 20 19:46:35 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Fri Jul 20 19:46:35 2018 -0400

----------------------------------------------------------------------
 .../src/site/markdown/yarn-service/Overview.md  |   4 +-
 .../markdown/yarn-service/ServiceUpgrade.md     | 197 +++++++++++++++++++
 2 files changed, 198 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1622a4b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 8e2bf9a..041b0ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -56,6 +56,4 @@ The benefits of combining these workloads are two-fold:
 * [Registry DNS](RegistryDNS.html): Deep dives into the Registry DNS internals.
 * [Examples](Examples.html): List some example service definitions (`Yarnfile`).
 * [Configurations](Configurations.html): Describes how to configure the custom services on YARN.
-
-
- 
+* [Service Upgrade](ServiceUpgrade.html): Describes how to upgrade a YARN service which is an experimental feature.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1622a4b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md
new file mode 100644
index 0000000..839be22
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md
@@ -0,0 +1,197 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Service Upgrade (Experimental Feature - Tech Preview)
+
+Yarn service provides a way of upgrading/downgrading long running applications without
+shutting down the application to minimize the downtime during this process. This is
+an experimental feature which is currently not enabled by default.
+
+## Overview
+
+Upgrading a Yarn Service is a 3 steps (or 2 steps when auto-finalization of
+upgrade is chosen) process:
+
+1. Initiate service upgrade.\
+This step involves providing the service spec of the newer version of the service.
+Once, the service upgrade is initiated, the state of the service is changed to
+`UPGRADING`.
+
+2. Upgrade component instances.\
+This step involves triggering upgrade of individual component instance.
+By providing an API to upgrade at instance level, users can orchestrate upgrade
+of the entire service in any order which is relevant for the service.\
+In addition, there are APIs to upgrade multiple instances, all instances of a
+component, and all instances of multiple components.
+
+3. Finalize upgrade.\
+This step involves finalization of upgrade. With an explicit step to finalize the
+upgrade, users have a chance to cancel current upgrade in progress. When the
+user chose to cancel, the service will make the best effort to revert to the
+previous version.\
+\
+When the upgrade is finalized, the old service definition is
+overwritten by the new service definition and the service state changes to `STABLE`.\
+A service can be auto-finalized when the upgrade is initialized with
+`-autoFinalize` option. With auto-finalization, when all the component-instances of
+the service have been upgraded, finalization will be performed automatically by the
+service framework.\
+\
+**NOTE**: Cancel of upgrade is not implemented yet.
+
+## Upgrade Example
+This example shows upgrade of sleeper service. Below is the sleeper service
+definition
+
+```
+{
+  "name": "sleeper-service",
+  "components" :
+    [
+      {
+        "name": "sleeper",
+        "version": "1.0.0",
+        "number_of_containers": 1,
+        "launch_command": "sleep 900000",
+        "resource": {
+          "cpus": 1,
+          "memory": "256"
+       }
+      }
+    ]
+}
+```
+Assuming, user launched an instance of sleeper service named as `my-sleeper`:
+```
+{
+  "components":
+    [
+      {
+        "configuration": {...},
+        "containers":
+          [
+            {
+              "bare_host": "0.0.0.0",
+              "component_instance_name": "sleeper-0",
+              "hostname": "example.local",
+              "id": "container_1531508836237_0002_01_000002",
+              "ip": "0.0.0.0",
+              "launch_time": 1531941023675,
+              "state": "READY"
+            },
+            {
+              "bare_host": "0.0.0.0",
+              "component_instance_name": "sleeper-1",
+              "hostname": "example.local",
+              "id": "container_1531508836237_0002_01_000003",
+              "ip": "0.0.0.0",
+              "launch_time": 1531941024680,
+              "state": "READY"
+            }
+          ],
+        "dependencies": [],
+        "launch_command": "sleep 900000",
+        "name": "sleeper",
+        "number_of_containers": 2,
+        "quicklinks": [],
+        "resource": {...},
+        "restart_policy": "ALWAYS",
+        "run_privileged_container": false,
+        "state": "STABLE"
+      }
+    ],
+  "configuration": {...},
+  "id": "application_1531508836237_0002",
+  "kerberos_principal": {},
+  "lifetime": -1,
+  "name": "my-sleeper",
+  "quicklinks": {},
+  "state": "STABLE",
+  "version": "1.0.0"
+}
+```
+
+### Enable Service Upgrade
+Below is the configuration in `yarn-site.xml` required for enabling service
+upgrade.
+
+```
+  <property>
+    <name>yarn.service.upgrade.enabled</name>
+    <value>true</value>
+  </property>
+```
+
+### Initiate Upgrade
+User can initiate upgrade using the below command:
+```
+yarn app -upgrade ${service_name} -initate ${path_to_new_service_def_file} [-autoFinalize]
+```
+
+e.g. To upgrade `my-sleeper` to sleep for *1200000* instead of *900000*, the user
+can upgrade the service to version 1.0.1. Below is the service definition for
+version 1.0.1 of sleeper-service:
+
+```
+{
+  "components" :
+    [
+      {
+        "name": "sleeper",
+        "version": "1.0.1",
+        "number_of_containers": 1,
+        "launch_command": "sleep 1200000",
+        "resource": {
+          "cpus": 1,
+          "memory": "256"
+        }
+      }
+    ]
+}
+```
+The command below initiates the upgrade to version 1.0.1.
+```
+yarn app -upgrade my-sleeper -initiate sleeper_v101.json
+```
+
+### Upgrade Instance
+User can upgrade a component instance using the below command:
+```
+yarn app -upgrade ${service_name} -instances ${comma_separated_list_of_instance_names}
+```
+e.g. The command below upgrades `sleeper-0` and `sleeper-1` instances of `my-service`:
+```
+yarn app -upgrade my-sleeper -instances sleeper-0,sleeper-1
+```
+
+### Upgrade Component
+User can upgrade a component, that is, all the instances of a component with
+one command:
+```
+yarn app -upgrade ${service_name} -components ${comma_separated_list_of_component_names}
+```
+e.g. The command below upgrades all the instances of `sleeper` component of `my-service`:
+```
+yarn app -ugrade my-sleeper -components sleeper
+```
+
+### Finalize Upgrade
+User must finalize the upgrade using the below command (since autoFinalize was not specified during initiate):
+```
+yarn app -upgrade ${service_name} -finalize
+```
+e.g. The command below finalizes the upgrade of `my-sleeper`:
+```
+yarn app -upgrade my-sleeper -finalize
+```


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] hadoop git commit: HDDS-259. Implement ContainerReportPublisher and NodeReportPublisher. Contributed by Nanda kumar.

Posted by in...@apache.org.
HDDS-259. Implement ContainerReportPublisher and NodeReportPublisher. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68b57ad3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68b57ad3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68b57ad3

Branch: refs/heads/HADOOP-15461
Commit: 68b57ad32cb0978ad5cd20b5fdc821f087a2c9dc
Parents: e9c44ec
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jul 20 09:07:58 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 20 09:12:48 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  | 26 ++++++++++++++--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  5 ---
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |  5 ---
 .../common/src/main/resources/ozone-default.xml | 26 +++++++++++++---
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  | 13 ++++----
 .../report/CommandStatusReportPublisher.java    | 24 ++++++++++++---
 .../common/report/ContainerReportPublisher.java | 25 ++++++++++++---
 .../common/report/NodeReportPublisher.java      | 32 ++++++++++++++++++--
 .../common/report/ReportPublisher.java          | 14 +++++++--
 .../common/report/TestReportPublisher.java      | 11 ++-----
 .../scm/container/closer/ContainerCloser.java   | 12 ++++----
 .../container/closer/TestContainerCloser.java   |  8 ++---
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 17 +++++------
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |  5 +--
 .../ozone/TestStorageContainerManager.java      |  8 ++---
 .../hadoop/ozone/scm/node/TestQueryNode.java    |  5 ++-
 16 files changed, 162 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 8b449fb..0283615 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -17,15 +17,35 @@
  */
 package org.apache.hadoop.hdds;
 
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 /**
- * Config class for HDDS.
+ * This class contains constants for configuration keys and default values
+ * used in hdds.
  */
 public final class HddsConfigKeys {
+
+  /**
+   * Do not instantiate.
+   */
   private HddsConfigKeys() {
   }
+
+  public static final String HDDS_HEARTBEAT_INTERVAL =
+      "hdds.heartbeat.interval";
+  public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT =
+      "30s";
+
+  public static final String HDDS_NODE_REPORT_INTERVAL =
+      "hdds.node.report.interval";
+  public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT =
+      "60s";
+
+  public static final String HDDS_CONTAINER_REPORT_INTERVAL =
+      "hdds.container.report.interval";
+  public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT =
+      "60s";
+
   public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
       "hdds.command.status.report.interval";
   public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
-      ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT;
+      "60s";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 46eb8aa..71184cf 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -156,11 +156,6 @@ public final class ScmConfigKeys {
       "ozone.scm.handler.count.key";
   public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
 
-  public static final String OZONE_SCM_HEARTBEAT_INTERVAL =
-      "ozone.scm.heartbeat.interval";
-  public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT =
-      "30s";
-
   public static final String OZONE_SCM_DEADNODE_INTERVAL =
       "ozone.scm.dead.node.interval";
   public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index fc10fbb..0273677 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -200,11 +200,6 @@ public final class OzoneConfigKeys {
   public static final int
       OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
 
-  public static final String OZONE_CONTAINER_REPORT_INTERVAL =
-      "ozone.container.report.interval";
-  public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
-      "60s";
-
   public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
       = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
   public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index da3870e..5a1d26a 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -153,13 +153,29 @@
     <description>The timeout duration for ratis server request.</description>
   </property>
   <property>
-    <name>ozone.container.report.interval</name>
+    <name>hdds.node.report.interval</name>
+    <value>60000ms</value>
+    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
+    <description>Time interval of the datanode to send node report. Each
+      datanode periodically send node report to SCM. Unit could be
+      defined with postfix (ns,ms,s,m,h,d)</description>
+  </property>
+  <property>
+    <name>hdds.container.report.interval</name>
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>
     <description>Time interval of the datanode to send container report. Each
-      datanode periodically send container report upon receive
-      sendContainerReport from SCM. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)</description>
+      datanode periodically send container report to SCM. Unit could be
+      defined with postfix (ns,ms,s,m,h,d)</description>
+  </property>
+  <property>
+    <name>hdds.command.status.report.interval</name>
+    <value>60000ms</value>
+    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
+    <description>Time interval of the datanode to send status of command
+      execution. Each datanode periodically the execution status of commands
+      received from SCM to SCM. Unit could be defined with postfix
+      (ns,ms,s,m,h,d)</description>
   </property>
   <!--Ozone Settings-->
   <property>
@@ -677,7 +693,7 @@
     </description>
   </property>
   <property>
-    <name>ozone.scm.heartbeat.interval</name>
+    <name>hdds.heartbeat.interval</name>
     <value>30s</value>
     <tag>OZONE, MANAGEMENT</tag>
     <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index cc7adbf..a8b919d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -29,13 +29,15 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_HEARTBEAT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_DEADNODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT;
@@ -181,9 +183,8 @@ public final class HddsServerUtil {
    * @return - HB interval in seconds.
    */
   public static long getScmHeartbeatInterval(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL,
-        ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT,
-        TimeUnit.SECONDS);
+    return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
+        HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
   }
 
   /**
@@ -225,7 +226,7 @@ public final class HddsServerUtil {
       sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
     } catch (IllegalArgumentException ex) {
       LOG.error("Stale Node Interval MS is cannot be honored due to " +
-          "mis-configured {}. ex:  {}", OZONE_SCM_HEARTBEAT_INTERVAL, ex);
+          "mis-configured {}. ex:  {}", HDDS_HEARTBEAT_INTERVAL, ex);
       throw ex;
     }
     return staleNodeIntervalMs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
index ca5174a..3898d15 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
@@ -19,12 +19,20 @@ package org.apache.hadoop.ozone.container.common.report;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT;
+
 /**
  * Publishes CommandStatusReport which will be sent to SCM as part of
  * heartbeat. CommandStatusReport consist of the following information:
@@ -42,9 +50,17 @@ public class CommandStatusReportPublisher extends
   protected long getReportFrequency() {
     if (cmdStatusReportInterval == -1) {
       cmdStatusReportInterval = getConf().getTimeDuration(
-          HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL,
-          HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT,
+          HDDS_COMMAND_STATUS_REPORT_INTERVAL,
+          HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT,
           TimeUnit.MILLISECONDS);
+
+      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
+          getConf());
+
+      Preconditions.checkState(
+          heartbeatFrequency < cmdStatusReportInterval,
+          HDDS_COMMAND_STATUS_REPORT_INTERVAL +
+              " cannot be configured lower than heartbeat frequency.");
     }
     return cmdStatusReportInterval;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
index ea2b987..3e73bb4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
@@ -17,13 +17,20 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
 
+import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT;
+
 
 /**
  * Publishes ContainerReport which will be sent to SCM as part of heartbeat.
@@ -49,9 +56,17 @@ public class ContainerReportPublisher extends
   protected long getReportFrequency() {
     if (containerReportInterval == null) {
       containerReportInterval = getConf().getTimeDuration(
-          OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL,
-          OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT,
+          HDDS_CONTAINER_REPORT_INTERVAL,
+          HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT,
           TimeUnit.MILLISECONDS);
+
+      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
+          getConf());
+
+      Preconditions.checkState(
+          heartbeatFrequency < containerReportInterval,
+          HDDS_CONTAINER_REPORT_INTERVAL +
+              " cannot be configured lower than heartbeat frequency.");
     }
     // Add a random delay (0~30s) on top of the container report
     // interval (60s) so tha the SCM is overwhelmed by the container reports
@@ -64,7 +79,7 @@ public class ContainerReportPublisher extends
   }
 
   @Override
-  protected ContainerReportsProto getReport() {
-    return ContainerReportsProto.getDefaultInstance();
+  protected ContainerReportsProto getReport() throws IOException {
+    return getContext().getParent().getContainer().getContainerReport();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java
index 704b1f5..b98cba4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java
@@ -17,8 +17,18 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_NODE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_NODE_REPORT_INTERVAL_DEFAULT;
 
 /**
  * Publishes NodeReport which will be sent to SCM as part of heartbeat.
@@ -28,13 +38,29 @@ import org.apache.hadoop.hdds.protocol.proto
  */
 public class NodeReportPublisher extends ReportPublisher<NodeReportProto> {
 
+  private Long nodeReportInterval;
+
   @Override
   protected long getReportFrequency() {
-    return 90000L;
+    if (nodeReportInterval == null) {
+      nodeReportInterval = getConf().getTimeDuration(
+          HDDS_NODE_REPORT_INTERVAL,
+          HDDS_NODE_REPORT_INTERVAL_DEFAULT,
+          TimeUnit.MILLISECONDS);
+
+      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
+          getConf());
+
+      Preconditions.checkState(
+          heartbeatFrequency < nodeReportInterval,
+          HDDS_NODE_REPORT_INTERVAL +
+              " cannot be configured lower than heartbeat frequency.");
+    }
+    return nodeReportInterval;
   }
 
   @Override
-  protected NodeReportProto getReport() {
-    return NodeReportProto.getDefaultInstance();
+  protected NodeReportProto getReport() throws IOException {
+    return getContext().getParent().getContainer().getNodeReport();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
index 105f073..e3910db 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
@@ -23,7 +23,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ozone.container.common.statemachine
     .DatanodeStateMachine.DatanodeStates;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
@@ -34,6 +37,9 @@ import java.util.concurrent.TimeUnit;
 public abstract class ReportPublisher<T extends GeneratedMessage>
     implements Configurable, Runnable {
 
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ReportPublisher.class);
+
   private Configuration config;
   private StateContext context;
   private ScheduledExecutorService executor;
@@ -76,7 +82,11 @@ public abstract class ReportPublisher<T extends GeneratedMessage>
    * Generates and publishes the report to datanode state context.
    */
   private void publishReport() {
-    context.addReport(getReport());
+    try {
+      context.addReport(getReport());
+    } catch (IOException e) {
+      LOG.error("Exception while publishing report.", e);
+    }
   }
 
   /**
@@ -91,7 +101,7 @@ public abstract class ReportPublisher<T extends GeneratedMessage>
    *
    * @return datanode report
    */
-  protected abstract T getReport();
+  protected abstract T getReport() throws IOException;
 
   /**
    * Returns {@link StateContext}.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index d4db55b..a0db2e8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -180,14 +180,9 @@ public class TestReportPublisher {
 
   @Test
   public void testAddingReportToHeartbeat() {
-    Configuration conf = new OzoneConfiguration();
-    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    ReportPublisher nodeReportPublisher = factory.getPublisherFor(
-        NodeReportProto.class);
-    ReportPublisher containerReportPubliser = factory.getPublisherFor(
-        ContainerReportsProto.class);
-    GeneratedMessage nodeReport = nodeReportPublisher.getReport();
-    GeneratedMessage containerReport = containerReportPubliser.getReport();
+    GeneratedMessage nodeReport = NodeReportProto.getDefaultInstance();
+    GeneratedMessage containerReport = ContainerReportsProto
+        .getDefaultInstance();
     SCMHeartbeatRequestProto.Builder heartbeatBuilder =
         SCMHeartbeatRequestProto.newBuilder();
     heartbeatBuilder.setDatanodeDetails(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
index eb591be..ba691ca 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
@@ -37,10 +37,10 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT;
 
 /**
  * A class that manages closing of containers. This allows transition from a
@@ -75,8 +75,8 @@ public class ContainerCloser {
     this.threadRunCount = new AtomicInteger(0);
     this.isRunning = new AtomicBoolean(false);
     this.reportInterval = this.configuration.getTimeDuration(
-        OZONE_CONTAINER_REPORT_INTERVAL,
-        OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
+        HDDS_CONTAINER_REPORT_INTERVAL,
+        HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
     Preconditions.checkState(this.reportInterval > 0,
         "report interval has to be greater than 0");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index 74238a7..f7863bc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -44,6 +44,8 @@ import java.io.File;
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_CONTAINER_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@@ -52,8 +54,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent
     .CREATE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent
     .CREATED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CONTAINER_REPORT_INTERVAL;
 
 /**
  * Test class for Closing Container.
@@ -72,7 +72,7 @@ public class TestContainerCloser {
     configuration = SCMTestUtils.getConf();
     size = configuration.getLong(OZONE_SCM_CONTAINER_SIZE_GB,
         OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
-    configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL,
+    configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL,
         1, TimeUnit.SECONDS);
     testDir = GenericTestUtils
         .getTestDir(TestContainerMapping.class.getSimpleName());
@@ -137,7 +137,7 @@ public class TestContainerCloser {
     // second report is discarded by the system if it lands in the 3 * report
     // frequency window.
 
-    configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1,
+    configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1,
         TimeUnit.SECONDS);
 
     ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index cefd179..5275992 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -58,11 +58,10 @@ import java.util.concurrent.TimeoutException;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_DEADNODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_STALENODE_INTERVAL;
@@ -359,7 +358,7 @@ public class TestNodeManager {
     final int interval = 100;
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
         MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
 
     // This should be 5 times more than  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
     // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
@@ -388,7 +387,7 @@ public class TestNodeManager {
     final int interval = 100;
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
         TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
 
     // This should be 5 times more than  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
     // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
@@ -413,7 +412,7 @@ public class TestNodeManager {
     OzoneConfiguration conf = getConf();
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
         MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 
@@ -551,7 +550,7 @@ public class TestNodeManager {
     OzoneConfiguration conf = getConf();
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
         MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 
@@ -729,7 +728,7 @@ public class TestNodeManager {
     OzoneConfiguration conf = getConf();
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
         MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 
@@ -820,7 +819,7 @@ public class TestNodeManager {
     OzoneConfiguration conf = getConf();
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
         MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1,
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1,
         SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000,
         MILLISECONDS);
@@ -985,7 +984,7 @@ public class TestNodeManager {
 
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
         MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 3ef74b0..9b7e399 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -59,6 +59,7 @@ import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
     .HEALTHY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
@@ -392,11 +393,11 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
 
     private void configureSCMheartbeat() {
       if (hbInterval.isPresent()) {
-        conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
+        conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
             hbInterval.get(), TimeUnit.MILLISECONDS);
 
       } else {
-        conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
+        conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
             DEFAULT_HB_INTERVAL_MS,
             TimeUnit.MILLISECONDS);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index cc367b3..7ca5fa1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone;
 
-import static org.junit.Assert.fail;
 import java.io.IOException;
 
 import org.apache.commons.lang3.RandomUtils;
@@ -68,6 +67,9 @@ import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 import org.apache.hadoop.test.GenericTestUtils;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
+import static org.junit.Assert.fail;
+
 /**
  * Test class that exercises the StorageContainerManager.
  */
@@ -186,9 +188,7 @@ public class TestStorageContainerManager {
   public void testBlockDeletionTransactions() throws Exception {
     int numKeys = 5;
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL,
-        5,
-        TimeUnit.SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 5, TimeUnit.SECONDS);
     conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         3000,
         TimeUnit.MILLISECONDS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
index 22528e4..a4fcd53 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
@@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
@@ -38,8 +39,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_DEADNODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_STALENODE_INTERVAL;
@@ -61,7 +60,7 @@ public class TestQueryNode {
 
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         interval, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] hadoop git commit: HDDS-275. Add message output for succeeded -deleteVolume CLI. Contributed by Nilotpal Nandi.

Posted by in...@apache.org.
HDDS-275. Add message output for succeeded -deleteVolume CLI. Contributed by Nilotpal Nandi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ae5567
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ae5567
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ae5567

Branch: refs/heads/HADOOP-15461
Commit: c7ae55675ed56cb18266425c02674a5a87561e0c
Parents: 6837121
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jul 20 13:12:07 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 20 13:12:53 2018 -0700

----------------------------------------------------------------------
 .../test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java  | 2 ++
 .../hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java       | 1 +
 2 files changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ae5567/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 8f53049..573f097 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -249,6 +249,8 @@ public class TestOzoneShell {
     String[] args = new String[] {"-deleteVolume", url + "/" + volumeName,
         "-root"};
     assertEquals(0, ToolRunner.run(shell, args));
+    String output = out.toString();
+    assertTrue(output.contains("Volume " + volumeName + " is deleted"));
 
     // verify if volume has been deleted
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ae5567/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
index d6facf6..2df788a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
@@ -67,5 +67,6 @@ public class DeleteVolumeHandler extends Handler {
     }
 
     client.getObjectStore().deleteVolume(volumeName);
+    System.out.printf("Volume %s is deleted%n", volumeName);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] hadoop git commit: YARN-6964. Fair scheduler misuses Resources operations. (Daniel Templeton and Szilard Nemeth via Haibo Chen)

Posted by in...@apache.org.
YARN-6964. Fair scheduler misuses Resources operations. (Daniel Templeton and Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a6bb840
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a6bb840
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a6bb840

Branch: refs/heads/HADOOP-15461
Commit: 8a6bb8409c2dc695c0ffc70df0528d7f8bd5d795
Parents: 5c19ee3
Author: Haibo Chen <ha...@apache.org>
Authored: Fri Jul 20 10:46:28 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Fri Jul 20 10:46:28 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/util/resource/Resources.java    | 20 +++++-
 .../scheduler/SchedulerApplicationAttempt.java  | 11 +--
 .../allocator/RegularContainerAllocator.java    |  4 +-
 .../scheduler/fair/AllocationConfiguration.java |  2 +-
 .../scheduler/fair/FSAppAttempt.java            |  3 +-
 .../scheduler/fair/FSLeafQueue.java             |  9 +--
 .../fair/policies/FairSharePolicy.java          | 76 ++++++++++----------
 .../TestSchedulerApplicationAttempt.java        | 58 +++++++++++++++
 .../scheduler/fair/FakeSchedulable.java         | 22 +++---
 9 files changed, 140 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index db0f980..8636577 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -242,7 +242,7 @@ public class Resources {
   public static boolean isNone(Resource other) {
     return NONE.equals(other);
   }
-  
+
   public static Resource unbounded() {
     return UNBOUNDED;
   }
@@ -300,8 +300,9 @@ public class Resources {
   }
 
   /**
-   * Subtract <code>rhs</code> from <code>lhs</code> and reset any negative
-   * values to zero.
+   * Subtract {@code rhs} from {@code lhs} and reset any negative values to
+   * zero. This call will modify {@code lhs}.
+   *
    * @param lhs {@link Resource} to subtract from
    * @param rhs {@link Resource} to subtract
    * @return the value of lhs after subtraction
@@ -317,6 +318,19 @@ public class Resources {
     return lhs;
   }
 
+  /**
+   * Subtract {@code rhs} from {@code lhs} and reset any negative values to
+   * zero. This call will operate on a copy of {@code lhs}, leaving {@code lhs}
+   * unmodified.
+   *
+   * @param lhs {@link Resource} to subtract from
+   * @param rhs {@link Resource} to subtract
+   * @return the value of lhs after subtraction
+   */
+  public static Resource subtractNonNegative(Resource lhs, Resource rhs) {
+    return subtractFromNonNegative(clone(lhs), rhs);
+  }
+
   public static Resource negate(Resource resource) {
     return subtract(NONE, resource);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 1225af1..dd6d38f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1280,8 +1280,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
   }
   
   @Private
-  public boolean hasPendingResourceRequest(ResourceCalculator rc,
-      String nodePartition, Resource cluster,
+  public boolean hasPendingResourceRequest(String nodePartition,
       SchedulingMode schedulingMode) {
     // We need to consider unconfirmed allocations
     if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
@@ -1294,16 +1293,12 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
     // To avoid too many allocation-proposals rejected for non-default
     // partition allocation
     if (StringUtils.equals(nodePartition, RMNodeLabelsManager.NO_LABEL)) {
-      pending = Resources.subtract(pending, Resources
+      pending = Resources.subtractNonNegative(pending, Resources
           .createResource(unconfirmedAllocatedMem.get(),
               unconfirmedAllocatedVcores.get()));
     }
 
-    if (Resources.greaterThan(rc, cluster, pending, Resources.none())) {
-      return true;
-    }
-
-    return false;
+    return !Resources.isNone(pending);
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index 8f49b41..a843002 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -855,8 +855,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
 
     if (reservedContainer == null) {
       // Check if application needs more resource, skip if it doesn't need more.
-      if (!application.hasPendingResourceRequest(rc,
-          candidates.getPartition(), clusterResource, schedulingMode)) {
+      if (!application.hasPendingResourceRequest(candidates.getPartition(),
+          schedulingMode)) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Skip app_attempt=" + application.getApplicationAttemptId()
               + ", because it doesn't need more resource, schedulingMode="

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index 54dd090..e48e04b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -257,7 +257,7 @@ public class AllocationConfiguration extends ReservationSchedulerConfiguration {
   }
 
   /**
-   * Get the maximum resource allocation for the given queue. If the max in not
+   * Get the maximum resource allocation for the given queue. If the max is not
    * set, return the default max.
    *
    * @param queue the target queue's name

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 281aded..d9f3262 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -1094,8 +1094,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
             (!hasRequestForRack || appSchedulingInfo.canDelayTo(key,
                 node.getRackName()) || (hasRequestForNode)) &&
             // The requested container must be able to fit on the node:
-            Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null,
-                resource,
+            Resources.fitsIn(resource,
                 node.getRMNode().getTotalCapability()))) {
       ret = false;
     } else if (!getQueue().fitsInMaxShare(resource)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index e7da16f..cbc74d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -554,13 +554,14 @@ public class FSLeafQueue extends FSQueue {
    */
   private Resource minShareStarvation() {
     // If demand < minshare, we should use demand to determine starvation
-    Resource desiredShare = Resources.min(policy.getResourceCalculator(),
-        scheduler.getClusterResource(), getMinShare(), getDemand());
+    Resource starvation =
+        Resources.componentwiseMin(getMinShare(), getDemand());
 
-    Resource starvation = Resources.subtract(desiredShare, getResourceUsage());
-    boolean starved = !Resources.isNone(starvation);
+    Resources.subtractFromNonNegative(starvation, getResourceUsage());
 
+    boolean starved = !Resources.isNone(starvation);
     long now = scheduler.getClock().getTime();
+
     if (!starved) {
       // Record that the queue is not starved
       setLastTimeAtMinShare(now);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index 8179aa7..0b5c10b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -83,17 +83,18 @@ public class FairSharePolicy extends SchedulingPolicy {
   private static class FairShareComparator implements Comparator<Schedulable>,
       Serializable {
     private static final long serialVersionUID = 5564969375856699313L;
-    private static final Resource ONE = Resources.createResource(1);
 
     @Override
     public int compare(Schedulable s1, Schedulable s2) {
       int res = compareDemand(s1, s2);
 
-      // Pre-compute resource usages to avoid duplicate calculation
-      Resource resourceUsage1 = s1.getResourceUsage();
-      Resource resourceUsage2 = s2.getResourceUsage();
+      // Share resource usages to avoid duplicate calculation
+      Resource resourceUsage1 = null;
+      Resource resourceUsage2 = null;
 
       if (res == 0) {
+        resourceUsage1 = s1.getResourceUsage();
+        resourceUsage2 = s2.getResourceUsage();
         res = compareMinShareUsage(s1, s2, resourceUsage1, resourceUsage2);
       }
 
@@ -116,41 +117,44 @@ public class FairSharePolicy extends SchedulingPolicy {
 
     private int compareDemand(Schedulable s1, Schedulable s2) {
       int res = 0;
-      Resource demand1 = s1.getDemand();
-      Resource demand2 = s2.getDemand();
-      if (demand1.equals(Resources.none()) && Resources.greaterThan(
-          RESOURCE_CALCULATOR, null, demand2, Resources.none())) {
+      long demand1 = s1.getDemand().getMemorySize();
+      long demand2 = s2.getDemand().getMemorySize();
+
+      if ((demand1 == 0) && (demand2 > 0)) {
         res = 1;
-      } else if (demand2.equals(Resources.none()) && Resources.greaterThan(
-          RESOURCE_CALCULATOR, null, demand1, Resources.none())) {
+      } else if ((demand2 == 0) && (demand1 > 0)) {
         res = -1;
       }
+
       return res;
     }
 
     private int compareMinShareUsage(Schedulable s1, Schedulable s2,
         Resource resourceUsage1, Resource resourceUsage2) {
       int res;
-      Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
-          s1.getMinShare(), s1.getDemand());
-      Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
-          s2.getMinShare(), s2.getDemand());
-      boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
-          resourceUsage1, minShare1);
-      boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
-          resourceUsage2, minShare2);
+      long minShare1 = Math.min(s1.getMinShare().getMemorySize(),
+          s1.getDemand().getMemorySize());
+      long minShare2 = Math.min(s2.getMinShare().getMemorySize(),
+          s2.getDemand().getMemorySize());
+      boolean s1Needy = resourceUsage1.getMemorySize() < minShare1;
+      boolean s2Needy = resourceUsage2.getMemorySize() < minShare2;
 
       if (s1Needy && !s2Needy) {
         res = -1;
       } else if (s2Needy && !s1Needy) {
         res = 1;
       } else if (s1Needy && s2Needy) {
-        double minShareRatio1 = (double) resourceUsage1.getMemorySize() /
-            Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE)
-                .getMemorySize();
-        double minShareRatio2 = (double) resourceUsage2.getMemorySize() /
-            Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE)
-                .getMemorySize();
+        double minShareRatio1 = (double) resourceUsage1.getMemorySize();
+        double minShareRatio2 = (double) resourceUsage2.getMemorySize();
+
+        if (minShare1 > 1) {
+          minShareRatio1 /= minShare1;
+        }
+
+        if (minShare2 > 1) {
+          minShareRatio2 /= minShare2;
+        }
+
         res = (int) Math.signum(minShareRatio1 - minShareRatio2);
       } else {
         res = 0;
@@ -173,18 +177,16 @@ public class FairSharePolicy extends SchedulingPolicy {
       if (weight1 > 0.0 && weight2 > 0.0) {
         useToWeightRatio1 = resourceUsage1.getMemorySize() / weight1;
         useToWeightRatio2 = resourceUsage2.getMemorySize() / weight2;
-      } else { // Either weight1 or weight2 equals to 0
-        if (weight1 == weight2) {
-          // If they have same weight, just compare usage
-          useToWeightRatio1 = resourceUsage1.getMemorySize();
-          useToWeightRatio2 = resourceUsage2.getMemorySize();
-        } else {
-          // By setting useToWeightRatios to negative weights, we give the
-          // zero-weight one less priority, so the non-zero weight one will
-          // be given slots.
-          useToWeightRatio1 = -weight1;
-          useToWeightRatio2 = -weight2;
-        }
+      } else if (weight1 == weight2) { // Either weight1 or weight2 equals to 0
+        // If they have same weight, just compare usage
+        useToWeightRatio1 = resourceUsage1.getMemorySize();
+        useToWeightRatio2 = resourceUsage2.getMemorySize();
+      } else {
+        // By setting useToWeightRatios to negative weights, we give the
+        // zero-weight one less priority, so the non-zero weight one will
+        // be given slots.
+        useToWeightRatio1 = -weight1;
+        useToWeightRatio2 = -weight2;
       }
 
       return (int) Math.signum(useToWeightRatio1 - useToWeightRatio2);
@@ -226,7 +228,7 @@ public class FairSharePolicy extends SchedulingPolicy {
 
   @Override
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
-    return Resources.greaterThan(RESOURCE_CALCULATOR, null, usage, fairShare);
+    return usage.getMemorySize() > fairShare.getMemorySize();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index c110b1c..b7a143b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import java.util.ArrayList;
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils.toSchedulerKey;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
@@ -24,6 +25,7 @@ import static org.mockito.Mockito.when;
 
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
@@ -41,10 +43,13 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.junit.After;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import org.junit.Test;
 
 public class TestSchedulerApplicationAttempt {
@@ -335,4 +340,57 @@ public class TestSchedulerApplicationAttempt {
     assertEquals(Integer.MAX_VALUE,
         app.getSchedulingOpportunities(schedulerKey));
   }
+
+  @Test
+  public void testHasPendingResourceRequest() throws Exception {
+    ApplicationAttemptId attemptId = createAppAttemptId(0, 0);
+    Queue queue = createQueue("test", null);
+    RMContext rmContext = mock(RMContext.class);
+    when(rmContext.getEpoch()).thenReturn(3L);
+    SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(
+        attemptId, "user", queue, queue.getAbstractUsersManager(), rmContext);
+
+    Priority priority = Priority.newInstance(1);
+    List<ResourceRequest> requests = new ArrayList<>(2);
+    Resource unit = Resource.newInstance(1L, 1);
+
+    // Add a request for a container with a node label
+    requests.add(ResourceRequest.newInstance(priority, ResourceRequest.ANY,
+        unit, 1, false, "label1"));
+    // Add a request for a container without a node label
+    requests.add(ResourceRequest.newInstance(priority, ResourceRequest.ANY,
+        unit, 1, false, ""));
+
+    // Add unique allocation IDs so that the requests aren't considered
+    // duplicates
+    requests.get(0).setAllocationRequestId(0L);
+    requests.get(1).setAllocationRequestId(1L);
+    app.updateResourceRequests(requests);
+
+    assertTrue("Reported no pending resource requests for no label when "
+        + "resource requests for no label are pending (exclusive partitions)",
+        app.hasPendingResourceRequest("",
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY));
+    assertTrue("Reported no pending resource requests for label with pending "
+        + "resource requests (exclusive partitions)",
+        app.hasPendingResourceRequest("label1",
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY));
+    assertFalse("Reported pending resource requests for label with no pending "
+        + "resource requests (exclusive partitions)",
+        app.hasPendingResourceRequest("label2",
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY));
+
+    assertTrue("Reported no pending resource requests for no label when "
+        + "resource requests for no label are pending (relaxed partitions)",
+        app.hasPendingResourceRequest("",
+            SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY));
+    assertTrue("Reported no pending resource requests for label with pending "
+        + "resource requests (relaxed partitions)",
+        app.hasPendingResourceRequest("label1",
+            SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY));
+    assertTrue("Reported no pending resource requests for label with no "
+        + "pending resource requests (relaxed partitions)",
+        app.hasPendingResourceRequest("label2",
+            SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
index 01eec73..b1fc2d0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
@@ -28,11 +28,12 @@ import org.apache.hadoop.yarn.util.resource.Resources;
  */
 public class FakeSchedulable implements Schedulable {
   private Resource usage;
-  private Resource minShare;
-  private Resource maxShare;
-  private Resource fairShare;
+  private final Resource demand;
+  private final Resource minShare;
+  private final Resource maxShare;
   private float weights;
-  private Priority priority;
+  private final Priority priority;
+  private Resource fairShare;
   private long startTime;
   
   public FakeSchedulable() {
@@ -75,10 +76,11 @@ public class FakeSchedulable implements Schedulable {
     this.minShare = minShare;
     this.maxShare = maxShare;
     this.weights = weight;
-    setFairShare(fairShare);
     this.usage = usage;
+    this.demand = Resources.multiply(usage, 2.0);
     this.priority = Records.newRecord(Priority.class);
-    this.startTime = startTime;
+    setFairShare(fairShare);
+    start(startTime);
   }
   
   @Override
@@ -92,13 +94,13 @@ public class FakeSchedulable implements Schedulable {
   }
 
   @Override
-  public void setFairShare(Resource fairShare) {
+  public final void setFairShare(Resource fairShare) {
     this.fairShare = fairShare;
   }
 
   @Override
   public Resource getDemand() {
-    return null;
+    return demand;
   }
 
   @Override
@@ -147,4 +149,8 @@ public class FakeSchedulable implements Schedulable {
   public void setResourceUsage(Resource usage) {
     this.usage = usage;
   }
+
+  public final void start(long time) {
+    startTime = time;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] hadoop git commit: YARN-8380. Support bind propagation options for mounts in docker runtime. Contributed by Billie Rinaldi

Posted by in...@apache.org.
YARN-8380.  Support bind propagation options for mounts in docker runtime.
            Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8688a0c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8688a0c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8688a0c7

Branch: refs/heads/HADOOP-15461
Commit: 8688a0c7f88f2adf1a7fce695e06f3dd1f745080
Parents: 17e2616
Author: Eric Yang <ey...@apache.org>
Authored: Mon Jul 23 20:12:04 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Mon Jul 23 20:12:04 2018 -0400

----------------------------------------------------------------------
 .../runtime/DockerLinuxContainerRuntime.java    |  37 ++-
 .../linux/runtime/docker/DockerRunCommand.java  |  18 +-
 .../container-executor/impl/utils/docker-util.c | 196 ++++++++------
 .../test/utils/test_docker_util.cc              | 133 +++++-----
 .../runtime/TestDockerContainerRuntime.java     | 259 +++++++++----------
 .../gpu/TestNvidiaDockerV1CommandPlugin.java    |   2 +-
 .../src/site/markdown/DockerContainers.md       |  13 +-
 7 files changed, 349 insertions(+), 309 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index c89d5fb..88e6c91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -154,9 +154,13 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *     {@code YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS} allows users to specify
  +     additional volume mounts for the Docker container. The value of the
  *     environment variable should be a comma-separated list of mounts.
- *     All such mounts must be given as {@code source:dest:mode}, and the mode
+ *     All such mounts must be given as {@code source:dest[:mode]} and the mode
  *     must be "ro" (read-only) or "rw" (read-write) to specify the type of
- *     access being requested. The requested mounts will be validated by
+ *     access being requested. If neither is specified, read-write will be
+ *     assumed. The mode may include a bind propagation option. In that case,
+ *     the mode should either be of the form [option], rw+[option], or
+ *     ro+[option]. Valid bind propagation options are shared, rshared, slave,
+ *     rslave, private, and rprivate. The requested mounts will be validated by
  *     container-executor based on the values set in container-executor.cfg for
  *     {@code docker.allowed.ro-mounts} and {@code docker.allowed.rw-mounts}.
  *   </li>
@@ -189,7 +193,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   private static final Pattern hostnamePattern = Pattern.compile(
       HOSTNAME_PATTERN);
   private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
-      "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
+      "(?<=^|,)([^:\\x00]+):([^:\\x00]+)" +
+          "(:(r[ow]|(r[ow][+])?(r?shared|r?slave|r?private)))?(?:,|$)");
   private static final int HOST_NAME_LENGTH = 64;
   private static final String DEFAULT_PROCFS = "/proc";
 
@@ -840,24 +845,30 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
                 + environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
       }
       parsedMounts.reset();
+      long mountCount = 0;
       while (parsedMounts.find()) {
+        mountCount++;
         String src = parsedMounts.group(1);
         java.nio.file.Path srcPath = java.nio.file.Paths.get(src);
         if (!srcPath.isAbsolute()) {
           src = mountReadOnlyPath(src, localizedResources);
         }
         String dst = parsedMounts.group(2);
-        String mode = parsedMounts.group(3);
-        if (!mode.equals("ro") && !mode.equals("rw")) {
-          throw new ContainerExecutionException(
-              "Invalid mount mode requested for mount: "
-                  + parsedMounts.group());
-        }
-        if (mode.equals("ro")) {
-          runCommand.addReadOnlyMountLocation(src, dst);
-        } else {
-          runCommand.addReadWriteMountLocation(src, dst);
+        String mode = parsedMounts.group(4);
+        if (mode == null) {
+          mode = "rw";
+        } else if (!mode.startsWith("ro") && !mode.startsWith("rw")) {
+          mode = "rw+" + mode;
         }
+        runCommand.addMountLocation(src, dst, mode);
+      }
+      long commaCount = environment.get(ENV_DOCKER_CONTAINER_MOUNTS).chars()
+          .filter(c -> c == ',').count();
+      if (mountCount != commaCount + 1) {
+        // this means the matcher skipped an improperly formatted mount
+        throw new ContainerExecutionException(
+            "Unable to parse some mounts in user supplied mount list: "
+                + environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index af16178..48ada5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -65,19 +65,15 @@ public class DockerRunCommand extends DockerCommand {
   }
 
   public DockerRunCommand addMountLocation(String sourcePath, String
-      destinationPath, boolean createSource) {
-    boolean sourceExists = new File(sourcePath).exists();
-    if (!sourceExists && !createSource) {
-      return this;
-    }
-    super.addCommandArguments("rw-mounts", sourcePath + ":" + destinationPath);
+      destinationPath, String mode) {
+    super.addCommandArguments("mounts", sourcePath + ":" +
+        destinationPath + ":" + mode);
     return this;
   }
 
   public DockerRunCommand addReadWriteMountLocation(String sourcePath, String
       destinationPath) {
-    super.addCommandArguments("rw-mounts", sourcePath + ":" + destinationPath);
-    return this;
+    return addMountLocation(sourcePath, destinationPath, "rw");
   }
 
   public DockerRunCommand addAllReadWriteMountLocations(List<String> paths) {
@@ -93,14 +89,12 @@ public class DockerRunCommand extends DockerCommand {
     if (!sourceExists && !createSource) {
       return this;
     }
-    super.addCommandArguments("ro-mounts", sourcePath + ":" + destinationPath);
-    return this;
+    return addReadOnlyMountLocation(sourcePath, destinationPath);
   }
 
   public DockerRunCommand addReadOnlyMountLocation(String sourcePath, String
       destinationPath) {
-    super.addCommandArguments("ro-mounts", sourcePath + ":" + destinationPath);
-    return this;
+    return addMountLocation(sourcePath, destinationPath, "ro");
   }
 
   public DockerRunCommand addAllReadOnlyMountLocations(List<String> paths) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 580cd37..899c46a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -1095,105 +1095,150 @@ static char* get_mount_source(const char *mount) {
   return strndup(mount, len);
 }
 
-static int add_mounts(const struct configuration *command_config, const struct configuration *conf, const char *key,
-                      const int ro, args *args) {
-  const char *ro_suffix = "";
+static char* get_mount_type(const char *mount) {
+  const char *tmp = strrchr(mount, ':');
+  if (tmp == NULL) {
+    fprintf(ERRORFILE, "Invalid docker mount '%s'\n", mount);
+    return NULL;
+  }
+  if (strlen(tmp) < 2) {
+    fprintf(ERRORFILE, "Invalid docker mount '%s'\n", mount);
+    return NULL;
+  }
+  char *mount_type = strdup(&tmp[1]);
+  if (strncmp("ro", mount_type, 2) != 0 &&
+      strncmp("rw", mount_type, 2) != 0) {
+    fprintf(ERRORFILE, "Invalid docker mount type '%s'\n", mount_type);
+    free(mount_type);
+    return NULL;
+  }
+  if (strlen(mount_type) > 2) {
+    if (strlen(mount_type) < 8 ||
+        (strcmp("shared", mount_type + 3) != 0 &&
+        strcmp("rshared", mount_type + 3) != 0 &&
+        strcmp("slave", mount_type + 3) != 0 &&
+        strcmp("rslave", mount_type + 3) != 0 &&
+        strcmp("private", mount_type + 3) != 0 &&
+        strcmp("rprivate", mount_type + 3) != 0)) {
+      fprintf(ERRORFILE, "Invalid docker mount type '%s'\n", mount_type);
+      free(mount_type);
+      return NULL;
+    }
+    mount_type[2] = ',';
+  }
+  return mount_type;
+}
+
+static int add_mounts(const struct configuration *command_config, const struct configuration *conf, args *args) {
   const char *tmp_path_buffer[2] = {NULL, NULL};
   char *mount_src = NULL;
+  char *mount_type = NULL;
   char **permitted_ro_mounts = get_configuration_values_delimiter("docker.allowed.ro-mounts",
                                                                   CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf, ",");
   char **permitted_rw_mounts = get_configuration_values_delimiter("docker.allowed.rw-mounts",
                                                                   CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf, ",");
-  char **values = get_configuration_values_delimiter(key, DOCKER_COMMAND_FILE_SECTION, command_config, ",");
+  char **values = get_configuration_values_delimiter("mounts", DOCKER_COMMAND_FILE_SECTION, command_config, ",");
   char *config_path = get_config_path("");
   const char *container_executor_cfg_path = normalize_mount(config_path, 0);
   free(config_path);
   int i = 0, permitted_rw = 0, permitted_ro = 0, ret = 0;
-  if (ro != 0) {
-    ro_suffix = ":ro";
+  if (values == NULL) {
+    goto free_and_exit;
   }
-  if (values != NULL) {
-    // Disable mount volumes if image is not trusted.
-    if (check_trusted_image(command_config, conf) != 0) {
-      fprintf(ERRORFILE, "Disable mount volume for untrusted image\n");
-      // YARN will implicitly bind node manager local directory to
-      // docker image.  This can create file system security holes,
-      // if docker container has binary to escalate privileges.
-      // For untrusted image, we drop mounting without reporting
-      // INVALID_DOCKER_MOUNT messages to allow running untrusted
-      // image in a sandbox.
-      ret = 0;
+  // Disable mount volumes if image is not trusted.
+  if (check_trusted_image(command_config, conf) != 0) {
+    fprintf(ERRORFILE, "Disable mount volume for untrusted image\n");
+    // YARN will implicitly bind node manager local directory to
+    // docker image.  This can create file system security holes,
+    // if docker container has binary to escalate privileges.
+    // For untrusted image, we drop mounting without reporting
+    // INVALID_DOCKER_MOUNT messages to allow running untrusted
+    // image in a sandbox.
+    ret = 0;
+    goto free_and_exit;
+  }
+  ret = normalize_mounts(permitted_ro_mounts, 1);
+  ret |= normalize_mounts(permitted_rw_mounts, 1);
+  if (ret != 0) {
+    fprintf(ERRORFILE, "Unable to find permitted docker mounts on disk\n");
+    ret = MOUNT_ACCESS_ERROR;
+    goto free_and_exit;
+  }
+  for (i = 0; values[i] != NULL; i++) {
+    mount_src = get_mount_source(values[i]);
+    if (mount_src == NULL) {
+      fprintf(ERRORFILE, "Invalid docker mount '%s'\n", values[i]);
+      ret = INVALID_DOCKER_MOUNT;
       goto free_and_exit;
     }
-    ret = normalize_mounts(permitted_ro_mounts, 1);
-    ret |= normalize_mounts(permitted_rw_mounts, 1);
-    if (ret != 0) {
-      fprintf(ERRORFILE, "Unable to find permitted docker mounts on disk\n");
-      ret = MOUNT_ACCESS_ERROR;
+    mount_type = get_mount_type(values[i]);
+    if (mount_type == NULL) {
+      fprintf(ERRORFILE, "Invalid docker mount '%s'\n", values[i]);
+      ret = INVALID_DOCKER_MOUNT;
       goto free_and_exit;
     }
-    for (i = 0; values[i] != NULL; i++) {
-      mount_src = get_mount_source(values[i]);
-      if (mount_src == NULL) {
-        fprintf(ERRORFILE, "Invalid docker mount '%s'\n", values[i]);
-        ret = INVALID_DOCKER_MOUNT;
-        goto free_and_exit;
-      }
-      permitted_rw = check_mount_permitted((const char **) permitted_rw_mounts, mount_src);
-      permitted_ro = check_mount_permitted((const char **) permitted_ro_mounts, mount_src);
-      if (permitted_ro == -1 || permitted_rw == -1) {
-        fprintf(ERRORFILE, "Invalid docker mount '%s', realpath=%s\n", values[i], mount_src);
-        ret = INVALID_DOCKER_MOUNT;
-        goto free_and_exit;
-      }
+    permitted_rw = check_mount_permitted((const char **) permitted_rw_mounts, mount_src);
+    permitted_ro = check_mount_permitted((const char **) permitted_ro_mounts, mount_src);
+    if (permitted_ro == -1 || permitted_rw == -1) {
+      fprintf(ERRORFILE, "Invalid docker mount '%s', realpath=%s\n", values[i], mount_src);
+      ret = INVALID_DOCKER_MOUNT;
+      goto free_and_exit;
+    }
+    if (strncmp("rw", mount_type, 2) == 0) {
       // rw mount
-      if (ro == 0) {
-        if (permitted_rw == 0) {
-          fprintf(ERRORFILE, "Invalid docker rw mount '%s', realpath=%s\n", values[i], mount_src);
+      if (permitted_rw == 0) {
+        fprintf(ERRORFILE, "Invalid docker rw mount '%s', realpath=%s\n", values[i], mount_src);
+        ret = INVALID_DOCKER_RW_MOUNT;
+        goto free_and_exit;
+      } else {
+        // determine if the user can modify the container-executor.cfg file
+        tmp_path_buffer[0] = normalize_mount(mount_src, 0);
+        // just re-use the function, flip the args to check if the container-executor path is in the requested
+        // mount point
+        ret = check_mount_permitted(tmp_path_buffer, container_executor_cfg_path);
+        free((void *) tmp_path_buffer[0]);
+        if (ret == 1) {
+          fprintf(ERRORFILE, "Attempting to mount a parent directory '%s' of container-executor.cfg as read-write\n",
+                  values[i]);
           ret = INVALID_DOCKER_RW_MOUNT;
           goto free_and_exit;
-        } else {
-          // determine if the user can modify the container-executor.cfg file
-          tmp_path_buffer[0] = normalize_mount(mount_src, 0);
-          // just re-use the function, flip the args to check if the container-executor path is in the requested
-          // mount point
-          ret = check_mount_permitted(tmp_path_buffer, container_executor_cfg_path);
-          free((void *) tmp_path_buffer[0]);
-          if (ret == 1) {
-            fprintf(ERRORFILE, "Attempting to mount a parent directory '%s' of container-executor.cfg as read-write\n",
-                    values[i]);
-            ret = INVALID_DOCKER_RW_MOUNT;
-            goto free_and_exit;
-          }
         }
       }
-      //ro mount
-      if (ro != 0 && permitted_ro == 0 && permitted_rw == 0) {
+    } else {
+      // ro mount
+      if (permitted_ro == 0 && permitted_rw == 0) {
         fprintf(ERRORFILE, "Invalid docker ro mount '%s', realpath=%s\n", values[i], mount_src);
         ret = INVALID_DOCKER_RO_MOUNT;
         goto free_and_exit;
       }
+    }
 
-      ret = add_to_args(args, "-v");
-      if (ret != 0) {
-        ret = BUFFER_TOO_SMALL;
-        goto free_and_exit;
-      }
+    if (strlen(mount_type) > 2) {
+      // overwrite separator between read mode and propagation option with ','
+      int mount_type_index = strlen(values[i]) - strlen(mount_type);
+      values[i][mount_type_index + 2] = ',';
+    }
 
-      char *tmp_buffer = make_string("%s%s", values[i], (char *) ro_suffix);
-      ret = add_to_args(args, tmp_buffer);
-      free(tmp_buffer);
-      if (ret != 0) {
-        ret = BUFFER_TOO_SMALL;
-        goto free_and_exit;
-      }
-      free(mount_src);
-      mount_src = NULL;
+    ret = add_to_args(args, "-v");
+    if (ret != 0) {
+      ret = BUFFER_TOO_SMALL;
+      goto free_and_exit;
+    }
+
+    ret = add_to_args(args, values[i]);
+    if (ret != 0) {
+      ret = BUFFER_TOO_SMALL;
+      goto free_and_exit;
     }
+    free(mount_src);
+    free(mount_type);
+    mount_src = NULL;
+    mount_type = NULL;
   }
 
 free_and_exit:
   free(mount_src);
+  free(mount_type);
   free_values(permitted_ro_mounts);
   free_values(permitted_rw_mounts);
   free_values(values);
@@ -1201,14 +1246,6 @@ free_and_exit:
   return ret;
 }
 
-static int add_ro_mounts(const struct configuration *command_config, const struct configuration *conf, args *args) {
-  return add_mounts(command_config, conf, "ro-mounts", 1, args);
-}
-
-static int  add_rw_mounts(const struct configuration *command_config, const struct configuration *conf, args *args) {
-  return add_mounts(command_config, conf, "rw-mounts", 0, args);
-}
-
 static int check_privileges(const char *user) {
   int ngroups = 0;
   gid_t *groups = NULL;
@@ -1427,12 +1464,7 @@ int get_docker_run_command(const char *command_file, const struct configuration
     goto free_and_exit;
   }
 
-  ret = add_ro_mounts(&command_config, conf, args);
-  if (ret != 0) {
-    goto free_and_exit;
-  }
-
-  ret = add_rw_mounts(&command_config, conf, args);
+  ret = add_mounts(&command_config, conf, args);
   if (ret != 0) {
     goto free_and_exit;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index 007e737..7e18146 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -934,36 +934,42 @@ namespace ContainerExecutor {
   }
 
 
-  TEST_F(TestDockerUtil, test_add_rw_mounts) {
+  TEST_F(TestDockerUtil, test_add_mounts) {
     struct configuration container_cfg, cmd_cfg;
     struct args buff = ARGS_INITIAL_VALUE;
     int ret = 0;
     std::string container_executor_cfg_contents = "[docker]\n  docker.trusted.registries=hadoop\n  "
-                                                              "docker.allowed.rw-mounts=/opt,/var,/usr/bin/cut\n  "
+                                                              "docker.allowed.rw-mounts=/opt,/var,/usr/bin/cut,/usr/bin/awk\n  "
                                                               "docker.allowed.ro-mounts=/etc/passwd";
     std::vector<std::pair<std::string, std::string> > file_cmd_vec;
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=/var:/var", "-v /var:/var"));
+        "[docker-command-execution]\n  docker-command=run\n image=hadoop/image\n  mounts=/var:/var:rw", "-v /var:/var:rw"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  rw-mounts=/var:/var", ""));
+        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  mounts=/var:/var:rw", ""));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=/var/:/var/", "-v /var/:/var/"));
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/var/:/var/:rw", "-v /var/:/var/:rw"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=/usr/bin/cut:/usr/bin/cut",
-        "-v /usr/bin/cut:/usr/bin/cut"));
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/usr/bin/cut:/usr/bin/cut:rw",
+        "-v /usr/bin/cut:/usr/bin/cut:rw"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  rw-mounts=/lib:/lib",
+        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  mounts=/lib:/lib:rw",
         ""));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=/opt:/mydisk1,/var/log/:/mydisk2",
-        "-v /opt:/mydisk1 -v /var/log/:/mydisk2"));
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/opt:/mydisk1:rw,/var/log/:/mydisk2:rw",
+        "-v /opt:/mydisk1:rw -v /var/log/:/mydisk2:rw"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  rw-mounts=/opt:/mydisk1,/var/log/:/mydisk2",
+        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  mounts=/opt:/mydisk1:rw,/var/log/:/mydisk2:rw",
         ""));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n", ""));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n", ""));
+    file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
+        "[docker-command-execution]\n  docker-command=run\n image=hadoop/image\n mounts=/usr/bin/awk:/awk:rw+shared,/etc/passwd:/etc/passwd:ro",
+        "-v /usr/bin/awk:/awk:rw,shared -v /etc/passwd:/etc/passwd:ro"));
+    file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
+        "[docker-command-execution]\n  docker-command=run\n image=hadoop/image\n mounts=/var:/var:ro+rprivate,/etc/passwd:/etc/passwd:ro+rshared",
+        "-v /var:/var:ro,rprivate -v /etc/passwd:/etc/passwd:ro,rshared"));
     write_container_executor_cfg(container_executor_cfg_contents);
     ret = read_config(container_executor_cfg_file.c_str(), &container_cfg);
     if (ret != 0) {
@@ -984,7 +990,7 @@ namespace ContainerExecutor {
       if (ret != 0) {
         FAIL();
       }
-      ret = add_rw_mounts(&cmd_cfg, &container_cfg, &buff);
+      ret = add_mounts(&cmd_cfg, &container_cfg, &buff);
       char *actual = flatten(&buff);
       ASSERT_EQ(0, ret);
       ASSERT_STREQ(itr->second.c_str(), actual);
@@ -995,13 +1001,22 @@ namespace ContainerExecutor {
 
     std::vector<std::pair<std::string, int> > bad_file_cmds_vec;
     bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=/lib:/lib",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/lib:/lib:rw",
         static_cast<int>(INVALID_DOCKER_RW_MOUNT)));
     bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=/usr/bin/:/usr/bin",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/usr/bin/:/usr/bin:rw",
         static_cast<int>(INVALID_DOCKER_RW_MOUNT)));
     bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=/blah:/blah",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/blah:/blah:rw",
+        static_cast<int>(INVALID_DOCKER_MOUNT)));
+    bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
+        "[docker-command-execution]\n  docker-command=run\n image=hadoop/image\n mounts=/tmp:/tmp:shared",
+        static_cast<int>(INVALID_DOCKER_MOUNT)));
+    bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
+        "[docker-command-execution]\n  docker-command=run\n image=hadoop/image\n mounts=/lib:/lib",
+        static_cast<int>(INVALID_DOCKER_MOUNT)));
+    bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
+        "[docker-command-execution]\n  docker-command=run\n image=hadoop/image\n mounts=/lib:/lib:other",
         static_cast<int>(INVALID_DOCKER_MOUNT)));
 
     std::vector<std::pair<std::string, int> >::const_iterator itr2;
@@ -1012,7 +1027,7 @@ namespace ContainerExecutor {
       if (ret != 0) {
         FAIL();
       }
-      ret = add_rw_mounts(&cmd_cfg, &container_cfg, &buff);
+      ret = add_mounts(&cmd_cfg, &container_cfg, &buff);
       char *actual = flatten(&buff);
       ASSERT_EQ(itr2->second, ret);
       ASSERT_STREQ("", actual);
@@ -1024,14 +1039,14 @@ namespace ContainerExecutor {
     // verify that you can't mount any directory in the container-executor.cfg path
     char *ce_path = realpath("../etc/hadoop/container-executor.cfg", NULL);
     while (strlen(ce_path) != 0) {
-      std::string cmd_file_contents = "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  rw-mounts=";
-      cmd_file_contents.append(ce_path).append(":").append("/etc/hadoop");
+      std::string cmd_file_contents = "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=";
+      cmd_file_contents.append(ce_path).append(":").append("/etc/hadoop").append(":rw");
       write_command_file(cmd_file_contents);
       ret = read_config(docker_command_file.c_str(), &cmd_cfg);
       if (ret != 0) {
         FAIL();
       }
-      ret = add_rw_mounts(&cmd_cfg, &container_cfg, &buff);
+      ret = add_mounts(&cmd_cfg, &container_cfg, &buff);
       ASSERT_EQ(INVALID_DOCKER_RW_MOUNT, ret) << " for input " << cmd_file_contents;
       char *actual = flatten(&buff);
       ASSERT_STREQ("", actual);
@@ -1046,7 +1061,7 @@ namespace ContainerExecutor {
     free(ce_path);
     free_configuration(&container_cfg);
 
-    // For untrusted image, container add_rw_mounts will pass through
+    // For untrusted image, container add_mounts will pass through
     // without mounting or report error code.
     container_executor_cfg_contents = "[docker]\n";
     write_container_executor_cfg(container_executor_cfg_contents);
@@ -1054,7 +1069,7 @@ namespace ContainerExecutor {
     if (ret != 0) {
       FAIL();
     }
-    ret = add_rw_mounts(&cmd_cfg, &container_cfg, &buff);
+    ret = add_mounts(&cmd_cfg, &container_cfg, &buff);
     char *actual = flatten(&buff);
     ASSERT_EQ(0, ret);
     ASSERT_STREQ("", actual);
@@ -1073,26 +1088,26 @@ namespace ContainerExecutor {
                                                               "docker.allowed.ro-mounts=/etc/passwd,/etc/group";
     std::vector<std::pair<std::string, std::string> > file_cmd_vec;
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  ro-mounts=/var:/var", ""));
+        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  mounts=/var:/var:ro", ""));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  ro-mounts=/etc:/etc", ""));
+        "[docker-command-execution]\n  docker-command=run\n image=nothadoop/image\n  mounts=/etc:/etc:ro", ""));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/var/:/var/", "-v /var/:/var/:ro"));
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/var/:/var/:ro", "-v /var/:/var/:ro"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/home:/home", "-v /home:/home:ro"));
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/home:/home:ro", "-v /home:/home:ro"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/home/:/home", "-v /home/:/home:ro"));
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/home/:/home:ro", "-v /home/:/home:ro"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/usr/bin/cut:/usr/bin/cut",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/usr/bin/cut:/usr/bin/cut:ro",
         "-v /usr/bin/cut:/usr/bin/cut:ro"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/etc/group:/etc/group",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/etc/group:/etc/group:ro",
         "-v /etc/group:/etc/group:ro"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/etc/passwd:/etc/passwd",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/etc/passwd:/etc/passwd:ro",
         "-v /etc/passwd:/etc/passwd:ro"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/var/log:/mydisk1,/etc/passwd:/etc/passwd",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/var/log:/mydisk1:ro,/etc/passwd:/etc/passwd:ro",
         "-v /var/log:/mydisk1:ro -v /etc/passwd:/etc/passwd:ro"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n", ""));
@@ -1116,7 +1131,7 @@ namespace ContainerExecutor {
       if (ret != 0) {
         FAIL();
       }
-      ret = add_ro_mounts(&cmd_cfg, &container_cfg, &buff);
+      ret = add_mounts(&cmd_cfg, &container_cfg, &buff);
       char *actual = flatten(&buff);
       ASSERT_EQ(0, ret);
       ASSERT_STREQ(itr->second.c_str(), actual);
@@ -1127,10 +1142,10 @@ namespace ContainerExecutor {
 
     std::vector<std::pair<std::string, int> > bad_file_cmds_vec;
     bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/etc:/etc",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/etc:/etc:ro",
         static_cast<int>(INVALID_DOCKER_RO_MOUNT)));
     bad_file_cmds_vec.push_back(std::make_pair<std::string, int>(
-        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/blah:/blah",
+        "[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/blah:/blah:ro",
         static_cast<int>(INVALID_DOCKER_MOUNT)));
 
     std::vector<std::pair<std::string, int> >::const_iterator itr2;
@@ -1141,7 +1156,7 @@ namespace ContainerExecutor {
       if (ret != 0) {
         FAIL();
       }
-      ret = add_ro_mounts(&cmd_cfg, &container_cfg, &buff);
+      ret = add_mounts(&cmd_cfg, &container_cfg, &buff);
       char *actual = flatten(&buff);
       ASSERT_EQ(itr2->second, ret);
       ASSERT_STREQ("", actual);
@@ -1157,12 +1172,12 @@ namespace ContainerExecutor {
     if (ret != 0) {
       FAIL();
     }
-    write_command_file("[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  ro-mounts=/home:/home");
+    write_command_file("[docker-command-execution]\n  docker-command=run\n  image=hadoop/image\n  mounts=/home:/home:ro");
     ret = read_config(docker_command_file.c_str(), &cmd_cfg);
     if (ret != 0) {
       FAIL();
     }
-    ret = add_ro_mounts(&cmd_cfg, &container_cfg, &buff);
+    ret = add_mounts(&cmd_cfg, &container_cfg, &buff);
     ASSERT_EQ(INVALID_DOCKER_RO_MOUNT, ret);
     ASSERT_EQ(0, buff.length);
     reset_args(&buff);
@@ -1203,18 +1218,18 @@ namespace ContainerExecutor {
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
         "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm -v /var/log:/var/log:ro -v /var/lib:/lib:ro"
-            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN"
+            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp:rw --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN"
             " --cap-add=SETUID --hostname=host-id --device=/dev/test:/dev/test hadoop/docker-image bash "
             "test_script.sh arg1 arg2"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n image=nothadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1225,18 +1240,18 @@ namespace ContainerExecutor {
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n  net=bridge\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
         "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm --net=bridge -v /var/log:/var/log:ro -v /var/lib:/lib:ro"
-            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN "
+            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp:rw --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN "
             "--cap-add=SETUID --hostname=host-id --device=/dev/test:/dev/test hadoop/docker-image bash"
             " test_script.sh arg1 arg2"));
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n image=nothadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  net=bridge\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1247,24 +1262,24 @@ namespace ContainerExecutor {
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=root\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n  net=bridge\n  privileged=true\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
         "run --name=container_e1_12312_11111_02_000001 -d --rm --net=bridge -v /var/log:/var/log:ro -v /var/lib:/lib:ro"
-            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp --cgroup-parent=ctr-cgroup --privileged --cap-drop=ALL "
+            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp:rw --cgroup-parent=ctr-cgroup --privileged --cap-drop=ALL "
             "--cap-add=CHOWN --cap-add=SETUID --hostname=host-id --device=/dev/test:/dev/test hadoop/docker-image "
             "bash test_script.sh arg1 arg2"));
 
     file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=root\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n  net=bridge\n  privileged=true\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n  group-add=1000,1001\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
         "run --name=container_e1_12312_11111_02_000001 -d --rm --net=bridge -v /var/log:/var/log:ro -v /var/lib:/lib:ro"
-            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp --cgroup-parent=ctr-cgroup --privileged --cap-drop=ALL "
+            " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp:rw --cgroup-parent=ctr-cgroup --privileged --cap-drop=ALL "
             "--cap-add=CHOWN --cap-add=SETUID --hostname=host-id "
             "--device=/dev/test:/dev/test hadoop/docker-image bash test_script.sh arg1 arg2"));
 
@@ -1292,7 +1307,7 @@ namespace ContainerExecutor {
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n image=nothadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  net=bridge\n  privileged=true\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n  group-add=1000,1001\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1302,7 +1317,7 @@ namespace ContainerExecutor {
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/var/log:/var/log\n"
+            "  mounts=/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/var/log:/var/log:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1312,7 +1327,7 @@ namespace ContainerExecutor {
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/bin:/bin,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/bin:/bin:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1322,7 +1337,7 @@ namespace ContainerExecutor {
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n"
             "  cap-add=CHOWN,SETUID,SETGID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1332,7 +1347,7 @@ namespace ContainerExecutor {
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/dev1:/dev/dev1\n  privileged=true\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1342,7 +1357,7 @@ namespace ContainerExecutor {
     bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
         "[docker-command-execution]\n"
             "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-            "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+            "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
             "  network=bridge\n  devices=/dev/test:/dev/test\n  privileged=true\n  net=host\n"
             "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
             "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1432,18 +1447,18 @@ namespace ContainerExecutor {
       file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
           "[docker-command-execution]\n"
               "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-              "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+              "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
               "  network=bridge\n  devices=/dev/test:/dev/test\n"
               "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
               "  launch-command=bash,test_script.sh,arg1,arg2",
           "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm -v /var/log:/var/log:ro -v /var/lib:/lib:ro"
-              " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN"
+              " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp:rw --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN"
               " --cap-add=SETUID --hostname=host-id --device=/dev/test:/dev/test hadoop/docker-image bash "
               "test_script.sh arg1 arg2"));
       file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
           "[docker-command-execution]\n"
               "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n image=nothadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-              "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+              "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
               "  network=bridge\n"
               "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
               "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1453,18 +1468,18 @@ namespace ContainerExecutor {
       file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
           "[docker-command-execution]\n"
               "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-              "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+              "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
               "  network=bridge\n  devices=/dev/test:/dev/test\n  net=bridge\n"
               "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
               "  launch-command=bash,test_script.sh,arg1,arg2",
           "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm --net=bridge -v /var/log:/var/log:ro -v /var/lib:/lib:ro"
-              " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN "
+              " -v /usr/bin/cut:/usr/bin/cut:ro -v /tmp:/tmp:rw --cgroup-parent=ctr-cgroup --cap-drop=ALL --cap-add=CHOWN "
               "--cap-add=SETUID --hostname=host-id --device=/dev/test:/dev/test hadoop/docker-image bash"
               " test_script.sh arg1 arg2"));
       file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
           "[docker-command-execution]\n"
               "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n image=nothadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-              "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+              "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
               "  network=bridge\n  net=bridge\n"
               "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
               "  launch-command=bash,test_script.sh,arg1,arg2",
@@ -1475,7 +1490,7 @@ namespace ContainerExecutor {
       bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
           "[docker-command-execution]\n"
               "  docker-command=run\n  name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=nobody\n  hostname=host-id\n"
-              "  ro-mounts=/var/log:/var/log,/var/lib:/lib,/usr/bin/cut:/usr/bin/cut\n  rw-mounts=/tmp:/tmp\n"
+              "  mounts=/var/log:/var/log:ro,/var/lib:/lib:ro,/usr/bin/cut:/usr/bin/cut:ro,/tmp:/tmp:rw\n"
               "  network=bridge\n  devices=/dev/test:/dev/test\n  net=bridge\n  privileged=true\n"
               "  cap-add=CHOWN,SETUID\n  cgroup-parent=ctr-cgroup\n  detach=true\n  rm=true\n"
               "  launch-command=bash,test_script.sh,arg1,arg2",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 855ec44..80823d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -394,7 +394,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
             (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -411,17 +411,16 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-        + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -445,7 +444,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    Assert.assertEquals(14, dockerCommands.size());
+    Assert.assertEquals(13, dockerCommands.size());
     int counter = 0;
     Assert.assertEquals("[docker-command-execution]",
         dockerCommands.get(counter++));
@@ -461,18 +460,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert
         .assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -560,7 +558,7 @@ public class TestDockerContainerRuntime {
     //This is the expected docker invocation for this case
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
-    int expected = 15;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -579,18 +577,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert
         .assertEquals("  net=" + allowedNetwork, dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -620,7 +617,7 @@ public class TestDockerContainerRuntime {
     //This is the expected docker invocation for this case
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
-    int expected = 15;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -639,18 +636,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert
         .assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -690,7 +686,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -711,16 +707,16 @@ public class TestDockerContainerRuntime {
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  name=container_e11_1518975676334_14532816_01_000001",
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  net=sdn1", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        "  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
         dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
+        "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  net=sdn1", dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -759,18 +755,16 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
-
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=sdn2", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -808,7 +802,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     Assert.assertEquals(expected, dockerCommands.size());
 
     String command = dockerCommands.get(0);
@@ -859,7 +853,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -876,18 +870,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals("  pid=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -912,7 +905,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     Assert.assertEquals(expected, dockerCommands.size());
 
     String command = dockerCommands.get(0);
@@ -1021,7 +1014,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -1036,18 +1029,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals("  privileged=true", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + submittingUser,
         dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
@@ -1108,7 +1100,7 @@ public class TestDockerContainerRuntime {
 
     env.put(
         DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
-        "source");
+        "/source");
 
     try {
       runtime.launchContainer(builder.build());
@@ -1138,7 +1130,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -1155,19 +1147,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro,"
+            + "/test_local_dir/test_resource_file:test_mount:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  ro-mounts=/test_filecache_dir:/test_filecache_dir,/"
-            + "test_user_filecache_dir:/test_user_filecache_dir,"
-            + "/test_local_dir/test_resource_file:test_mount",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -1194,7 +1184,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -1211,20 +1201,18 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro,"
+            + "/test_local_dir/test_resource_file:test_mount1:ro,"
+            + "/test_local_dir/test_resource_file:test_mount2:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir,"
-            + "/test_local_dir/test_resource_file:test_mount1,"
-            + "/test_local_dir/test_resource_file:test_mount2",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -1240,7 +1228,8 @@ public class TestDockerContainerRuntime {
 
     env.put(
         DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
-        "/tmp/foo:/tmp/foo:ro,/tmp/bar:/tmp/bar:rw");
+        "/tmp/foo:/tmp/foo:ro,/tmp/bar:/tmp/bar:rw,/tmp/baz:/tmp/baz," +
+            "/a:/a:shared,/b:/b:ro+shared,/c:/c:rw+rshared,/d:/d:private");
 
     runtime.launchContainer(builder.build());
     PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
@@ -1250,7 +1239,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -1267,19 +1256,19 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro,"
+            + "/tmp/foo:/tmp/foo:ro,"
+            + "/tmp/bar:/tmp/bar:rw,/tmp/baz:/tmp/baz:rw,/a:/a:rw+shared,"
+            + "/b:/b:ro+shared,/c:/c:rw+rshared,/d:/d:rw+private",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir,"
-            + "/tmp/foo:/tmp/foo",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir,"
-            + "/tmp/bar:/tmp/bar",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -1293,7 +1282,7 @@ public class TestDockerContainerRuntime {
 
     env.put(
         DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
-        "source:target");
+        "/source:target:ro,/source:target:other,/source:target:rw");
 
     try {
       runtime.launchContainer(builder.build());
@@ -1311,7 +1300,7 @@ public class TestDockerContainerRuntime {
 
     env.put(
         DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
-        "source:target:other");
+        "/source:target:other");
 
     try {
       runtime.launchContainer(builder.build());
@@ -1329,7 +1318,7 @@ public class TestDockerContainerRuntime {
 
     env.put(
         DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
-        "s\0ource:target:ro");
+        "/s\0ource:target:ro");
 
     try {
       runtime.launchContainer(builder.build());
@@ -1357,7 +1346,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -1374,18 +1363,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro,"
+            + "/tmp/foo:/tmp/foo:ro,/tmp/bar:/tmp/bar:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-        + "/test_user_filecache_dir:/test_user_filecache_dir,"
-        + "/tmp/foo:/tmp/foo,/tmp/bar:/tmp/bar",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-        + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -1425,7 +1413,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -1442,18 +1430,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro,"
+            + "/tmp/foo:/tmp/foo:rw,/tmp/bar:/tmp/bar:rw",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-        + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-        + "/test_application_local_dir:/test_application_local_dir,"
-        + "/tmp/foo:/tmp/foo,/tmp/bar:/tmp/bar",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter));
@@ -2012,7 +1999,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -2029,18 +2016,17 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro,"
+            + "/source/path:/destination/path:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir,"
-            + "/source/path:/destination/path",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
 
     // Verify volume-driver is set to expected value.
@@ -2153,7 +2139,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -2172,17 +2158,16 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
         dockerCommands.get(counter++));
+    Assert.assertEquals("  mounts="
+            + "/test_container_log_dir:/test_container_log_dir:rw,"
+            + "/test_application_local_dir:/test_application_local_dir:rw,"
+            + "/test_filecache_dir:/test_filecache_dir:ro,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir:ro",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
-            + "/test_user_filecache_dir:/test_user_filecache_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals(
-        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
-            + "/test_application_local_dir:/test_application_local_dir",
-        dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java
index 7057847..c8b2eaf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java
@@ -196,7 +196,7 @@ public class TestNvidiaDockerV1CommandPlugin {
     // Volume driver should not be included by final commandline
     Assert.assertFalse(newCommandLine.containsKey("volume-driver"));
     Assert.assertTrue(newCommandLine.containsKey("devices"));
-    Assert.assertTrue(newCommandLine.containsKey("ro-mounts"));
+    Assert.assertTrue(newCommandLine.containsKey("mounts"));
 
     /* Test get docker volume command */
     commandPlugin = new MyNvidiaDockerV1CommandPlugin(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index a2ef6fe..e35c906 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -304,7 +304,7 @@ environment variables in the application's environment:
 | `YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK` | Sets the network type to be used by the Docker container. It must be a valid value as determined by the yarn.nodemanager.runtime.linux.docker.allowed-container-networks property. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_PID_NAMESPACE` | Controls which PID namespace will be used by the Docker container. By default, each Docker container has its own PID namespace. To share the namespace of the host, the yarn.nodemanager.runtime.linux.docker.host-pid-namespace.allowed property must be set to true. If the host PID namespace is allowed and this environment variable is set to host, the Docker container will share the host's PID namespace. No other value is allowed. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER` | Controls whether the Docker container is a privileged container. In order to use privileged containers, the yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed property must be set to true, and the application owner must appear in the value of the yarn.nodemanager.runtime.linux.docker.privileged-containers.acl property. If this environment variable is set to true, a privileged Docker container will be used if allowed. No other value is allowed, so the environment variable should be left unset rather than setting it to false. |
-| `YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS` | Adds additional volume mounts to the Docker container. The value of the environment variable should be a comma-separated list of mounts. All such mounts must be given as "source:dest:mode" and the mode must be "ro" (read-only) or "rw" (read-write) to specify the type of access being requested. The requested mounts will be validated by container-executor based on the values set in container-executor.cfg for docker.allowed.ro-mounts and docker.allowed.rw-mounts. |
+| `YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS` | Adds additional volume mounts to the Docker container. The value of the environment variable should be a comma-separated list of mounts. All such mounts must be given as `source:dest[:mode]` and the mode must be "ro" (read-only) or "rw" (read-write) to specify the type of access being requested. If neither is specified, read-write will be  assumed. The mode may include a bind propagation option. In that case, the mode should either be of the form `[option]`, `rw+[option]`, or `ro+[option]`. Valid bind propagation options are shared, rshared, slave, rslave, private, and rprivate. The requested mounts will be validated by container-executor based on the values set in container-executor.cfg for `docker.allowed.ro-mounts` and `docker.allowed.rw-mounts`. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_DELAYED_REMOVAL` | Allows a user to request delayed deletion of the Docker container on a per container basis. If true, Docker containers will not be removed until the duration defined by yarn.nodemanager.delete.debug-delay-sec has elapsed. Administrators can disable this feature through the yarn-site property yarn.nodemanager.runtime.linux.docker.delayed-removal.allowed. This feature is disabled by default. When this feature is disabled or set to false, the container will be removed as soon as it exits. |
 
 The first two are required. The remainder can be set as needed. While
@@ -347,10 +347,13 @@ supplied by the user must either match or be a child of the specified
 directory.
 
 The user supplied mount list is defined as a comma separated list in the form
-*source*:*destination*:*mode*. The source is the file or directory on the host.
-The destination is the path within the contatiner where the source will be bind
-mounted. The mode defines the mode the user expects for the mount, which can be
-ro (read-only) or rw (read-write).
+*source*:*destination* or *source*:*destination*:*mode*. The source is the file
+or directory on the host. The destination is the path within the container
+where the source will be bind mounted. The mode defines the mode the user
+expects for the mount, which can be ro (read-only) or rw (read-write). If not
+specified, rw is assumed. The mode may also include a bind propagation option
+ (shared, rshared, slave, rslave, private, or rprivate). In that case, the
+ mode should be of the form *option*, rw+*option*, or ro+*option*.
 
 The following example outlines how to use this feature to mount the commonly
 needed /sys/fs/cgroup directory into the container running on YARN.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] hadoop git commit: HDDS-241. Handle Volume in inconsistent state. Contributed by Hanisha Koneru.

Posted by in...@apache.org.
HDDS-241. Handle Volume in inconsistent state. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5d44473
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5d44473
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5d44473

Branch: refs/heads/HADOOP-15461
Commit: d5d444732bf5c3f3cfc681f8d87e0681a7471f2f
Parents: 1af87df
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jul 18 09:38:43 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 18 09:38:43 2018 -0700

----------------------------------------------------------------------
 .../container/common/volume/HddsVolume.java     | 45 +++++++++--
 .../container/common/volume/VolumeSet.java      | 14 +++-
 .../container/common/volume/TestVolumeSet.java  | 78 +++++++++++++++++---
 .../container/ozoneimpl/TestOzoneContainer.java | 18 ++++-
 4 files changed, 129 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d44473/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 1e71494..6468720 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -42,6 +42,18 @@ import java.util.Properties;
  * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a
  * list of HddsVolumes, one for each volume in the Datanode.
  * {@link VolumeInfo} in encompassed by this class.
+ *
+ * The disk layout per volume is as follows:
+ * ../hdds/VERSION
+ * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>>/metadata
+ * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>>/<<dataDir>>
+ *
+ * Each hdds volume has its own VERSION file. The hdds volume will have one
+ * scmUuid directory for each SCM it is a part of (currently only one SCM is
+ * supported).
+ *
+ * During DN startup, if the VERSION file exists, we verify that the
+ * clusterID in the version file matches the clusterID from SCM.
  */
 public final class HddsVolume {
 
@@ -108,11 +120,6 @@ public final class HddsVolume {
   }
 
   private HddsVolume(Builder b) throws IOException {
-    Preconditions.checkNotNull(b.volumeRootStr,
-        "Volume root dir cannot be null");
-    Preconditions.checkNotNull(b.datanodeUuid, "DatanodeUUID cannot be null");
-    Preconditions.checkNotNull(b.conf, "Configuration cannot be null");
-
     StorageLocation location = StorageLocation.parse(b.volumeRootStr);
     hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR);
     this.state = VolumeState.NOT_INITIALIZED;
@@ -162,6 +169,10 @@ public final class HddsVolume {
       readVersionFile();
       setState(VolumeState.NORMAL);
       break;
+    case INCONSISTENT:
+      // Volume Root is in an inconsistent state. Skip loading this volume.
+      throw new IOException("Volume is in an " + VolumeState.INCONSISTENT +
+          " state. Skipped loading volume: " + hddsRootDir.getPath());
     default:
       throw new IOException("Unrecognized initial state : " +
           intialVolumeState + "of volume : " + hddsRootDir);
@@ -170,11 +181,23 @@ public final class HddsVolume {
 
   private VolumeState analyzeVolumeState() {
     if (!hddsRootDir.exists()) {
+      // Volume Root does not exist.
       return VolumeState.NON_EXISTENT;
     }
-    if (!getVersionFile().exists()) {
+    if (!hddsRootDir.isDirectory()) {
+      // Volume Root exists but is not a directory.
+      return VolumeState.INCONSISTENT;
+    }
+    File[] files = hddsRootDir.listFiles();
+    if (files == null || files.length == 0) {
+      // Volume Root exists and is empty.
       return VolumeState.NOT_FORMATTED;
     }
+    if (!getVersionFile().exists()) {
+      // Volume Root is non empty but VERSION file does not exist.
+      return VolumeState.INCONSISTENT;
+    }
+    // Volume Root and VERSION file exist.
     return VolumeState.NOT_INITIALIZED;
   }
 
@@ -321,11 +344,21 @@ public final class HddsVolume {
 
   /**
    * VolumeState represents the different states a HddsVolume can be in.
+   * NORMAL          => Volume can be used for storage
+   * FAILED          => Volume has failed due and can no longer be used for
+   *                    storing containers.
+   * NON_EXISTENT    => Volume Root dir does not exist
+   * INCONSISTENT    => Volume Root dir is not empty but VERSION file is
+   *                    missing or Volume Root dir is not a directory
+   * NOT_FORMATTED   => Volume Root exists but not formatted (no VERSION file)
+   * NOT_INITIALIZED => VERSION file exists but has not been verified for
+   *                    correctness.
    */
   public enum VolumeState {
     NORMAL,
     FAILED,
     NON_EXISTENT,
+    INCONSISTENT,
     NOT_FORMATTED,
     NOT_INITIALIZED
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d44473/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 692a9d1..2dd4763 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -202,18 +202,19 @@ public class VolumeSet {
 
 
   // Add a volume to VolumeSet
-  public void addVolume(String dataDir) throws IOException {
-    addVolume(dataDir, StorageType.DEFAULT);
+  public boolean addVolume(String dataDir) {
+    return addVolume(dataDir, StorageType.DEFAULT);
   }
 
   // Add a volume to VolumeSet
-  public void addVolume(String volumeRoot, StorageType storageType)
-      throws IOException {
+  public boolean addVolume(String volumeRoot, StorageType storageType) {
     String hddsRoot = HddsVolumeUtil.getHddsRoot(volumeRoot);
+    boolean success;
 
     try (AutoCloseableLock lock = volumeSetLock.acquire()) {
       if (volumeMap.containsKey(hddsRoot)) {
         LOG.warn("Volume : {} already exists in VolumeMap", hddsRoot);
+        success = false;
       } else {
         if (failedVolumeMap.containsKey(hddsRoot)) {
           failedVolumeMap.remove(hddsRoot);
@@ -225,8 +226,13 @@ public class VolumeSet {
 
         LOG.info("Added Volume : {} to VolumeSet",
             hddsVolume.getHddsRootDir().getPath());
+        success = true;
       }
+    } catch (IOException ex) {
+      LOG.error("Failed to add volume " + volumeRoot + " to VolumeSet", ex);
+      success = false;
     }
+    return success;
   }
 
   // Mark a volume as failed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d44473/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 41f75bd..4f75b9a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -18,22 +18,30 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
+import java.io.IOException;
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+
+import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
+    .HDDS_VOLUME_DIR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
+import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
@@ -69,6 +77,28 @@ public class TestVolumeSet {
     initializeVolumeSet();
   }
 
+  @After
+  public void shutdown() throws IOException {
+    // Delete the hdds volume root dir
+    List<HddsVolume> volumes = new ArrayList<>();
+    volumes.addAll(volumeSet.getVolumesList());
+    volumes.addAll(volumeSet.getFailedVolumesList());
+
+    for (HddsVolume volume : volumes) {
+      FileUtils.deleteDirectory(volume.getHddsRootDir());
+    }
+  }
+
+  private boolean checkVolumeExistsInVolumeSet(String volume) {
+    for (HddsVolume hddsVolume : volumeSet.getVolumesList()) {
+      if (hddsVolume.getHddsRootDir().getPath().equals(
+          HddsVolumeUtil.getHddsRoot(volume))) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   @Test
   public void testVolumeSetInitialization() throws Exception {
 
@@ -84,14 +114,18 @@ public class TestVolumeSet {
   }
 
   @Test
-  public void testAddVolume() throws Exception {
+  public void testAddVolume() {
 
     assertEquals(2, volumeSet.getVolumesList().size());
 
     // Add a volume to VolumeSet
     String volume3 = baseDir + "disk3";
-    volumeSet.addVolume(volume3);
+//    File dir3 = new File(volume3, "hdds");
+//    File[] files = dir3.listFiles();
+//    System.out.println("------ " + files[0].getPath());
+    boolean success = volumeSet.addVolume(volume3);
 
+    assertTrue(success);
     assertEquals(3, volumeSet.getVolumesList().size());
     assertTrue("AddVolume did not add requested volume to VolumeSet",
         checkVolumeExistsInVolumeSet(volume3));
@@ -122,7 +156,6 @@ public class TestVolumeSet {
   @Test
   public void testRemoveVolume() throws Exception {
 
-    List<HddsVolume> volumesList = volumeSet.getVolumesList();
     assertEquals(2, volumeSet.getVolumesList().size());
 
     // Remove a volume from VolumeSet
@@ -141,13 +174,34 @@ public class TestVolumeSet {
         + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
   }
 
-  private boolean checkVolumeExistsInVolumeSet(String volume) {
-    for (HddsVolume hddsVolume : volumeSet.getVolumesList()) {
-      if (hddsVolume.getHddsRootDir().getPath().equals(
-          HddsVolumeUtil.getHddsRoot(volume))) {
-        return true;
-      }
-    }
-    return false;
+  @Test
+  public void testVolumeInInconsistentState() throws Exception {
+    assertEquals(2, volumeSet.getVolumesList().size());
+
+    // Add a volume to VolumeSet
+    String volume3 = baseDir + "disk3";
+
+    // Create the root volume dir and create a sub-directory within it.
+    File newVolume = new File(volume3, HDDS_VOLUME_DIR);
+    System.out.println("new volume root: " + newVolume);
+    newVolume.mkdirs();
+    assertTrue("Failed to create new volume root", newVolume.exists());
+    File dataDir = new File(newVolume, "chunks");
+    dataDir.mkdirs();
+    assertTrue(dataDir.exists());
+
+    // The new volume is in an inconsistent state as the root dir is
+    // non-empty but the version file does not exist. Add Volume should
+    // return false.
+    boolean success = volumeSet.addVolume(volume3);
+
+    assertFalse(success);
+    assertEquals(2, volumeSet.getVolumesList().size());
+    assertTrue("AddVolume should fail for an inconsistent volume",
+        !checkVolumeExistsInVolumeSet(volume3));
+
+    // Delete volume3
+    File volume = new File(volume3);
+    FileUtils.deleteDirectory(volume);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d44473/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 27c6528..284ffa3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
@@ -60,21 +61,30 @@ public class TestOzoneContainer {
   public void setUp() throws Exception {
     conf = new OzoneConfiguration();
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot()
-        .getAbsolutePath() + "," + folder.newFolder().getAbsolutePath());
+        .getAbsolutePath());
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath());
+  }
+
+  @Test
+  public void testBuildContainerMap() throws Exception {
     volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
     volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
 
+    // Format the volumes
+    for (HddsVolume volume : volumeSet.getVolumesList()) {
+      volume.format(UUID.randomUUID().toString());
+    }
+
+    // Add containers to disk
     for (int i=0; i<10; i++) {
       keyValueContainerData = new KeyValueContainerData(i, 1);
       keyValueContainer = new KeyValueContainer(
           keyValueContainerData, conf);
       keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
     }
-  }
 
-  @Test
-  public void testBuildContainerMap() throws Exception {
+    // When OzoneContainer is started, the containers from disk should be
+    // loaded into the containerSet.
     OzoneContainer ozoneContainer = new
         OzoneContainer(datanodeDetails, conf);
     ContainerSet containerset = ozoneContainer.getContainerSet();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] hadoop git commit: YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang.

Posted by in...@apache.org.
YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35ce6eb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35ce6eb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35ce6eb1

Branch: refs/heads/HADOOP-15461
Commit: 35ce6eb1f526ce3db7e015fb1761eee15604100c
Parents: 773d312
Author: Sunil G <su...@apache.org>
Authored: Tue Jul 24 22:20:06 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Jul 24 22:20:17 2018 +0530

----------------------------------------------------------------------
 .../scheduler/capacity/TestContainerResizing.java | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35ce6eb1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
index eacbf6e..307d5ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
@@ -58,7 +59,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica
     .FiCaSchedulerNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -740,11 +740,14 @@ public class TestContainerResizing {
   @Test
   public void testIncreaseContainerUnreservedWhenApplicationCompleted()
       throws Exception {
+    // Disable relaunch app attempt on failure, in order to check
+    // resource usages for current app only.
+    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
     /**
      * Similar to testIncreaseContainerUnreservedWhenContainerCompleted, when
      * application finishes, reserved increase container should be cancelled
      */
-    MockRM rm1 = new MockRM() {
+    MockRM rm1 = new MockRM(conf) {
       @Override
       public RMNodeLabelsManager createNodeLabelManager() {
         return mgr;
@@ -807,9 +810,14 @@ public class TestContainerResizing {
     Assert.assertEquals(6 * GB,
         app.getAppAttemptResourceUsage().getReserved().getMemorySize());
 
-    // Kill the application
-    cs.handle(new AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(),
-        RMAppAttemptState.KILLED, false));
+    // Kill the application by killing the AM container
+    ContainerId amContainer =
+        ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
+    cs.killContainer(cs.getRMContainer(amContainer));
+    rm1.waitForState(am1.getApplicationAttemptId(),
+        RMAppAttemptState.FAILED);
+    rm1.waitForState(am1.getApplicationAttemptId().getApplicationId(),
+        RMAppState.FAILED);
 
     /* Check statuses after reservation satisfied */
     // Increase request should be unreserved


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] hadoop git commit: HDDS-264. 'oz' subcommand reference is not present in 'ozone' command help. Contributed by Sandeep Nemuri.

Posted by in...@apache.org.
HDDS-264. 'oz' subcommand reference is not present in 'ozone' command help. Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c19ee39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c19ee39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c19ee39

Branch: refs/heads/HADOOP-15461
Commit: 5c19ee3994af06bbc85f3575e3e4421babc0ba5c
Parents: 68b57ad
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Fri Jul 20 22:12:40 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Fri Jul 20 22:13:09 2018 +0530

----------------------------------------------------------------------
 hadoop-ozone/common/src/main/bin/ozone | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c19ee39/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 9495eff..927bc84 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -41,7 +41,7 @@ function hadoop_usage
   hadoop_add_subcommand "getozoneconf" client "get ozone config values from configuration"
   hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
   hadoop_add_subcommand "om" daemon "Ozone Manager"
-  hadoop_add_subcommand "o3" client "command line interface for ozone"
+  hadoop_add_subcommand "oz" client "command line interface for ozone"
   hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
   hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] hadoop git commit: YARN-8528. Final states in ContainerAllocation might be modified externally causing unexpected allocation results. Contributed by Xintong Song.

Posted by in...@apache.org.
YARN-8528. Final states in ContainerAllocation might be modified externally causing unexpected allocation results. Contributed by Xintong Song.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbf20264
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbf20264
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbf20264

Branch: refs/heads/HADOOP-15461
Commit: cbf20264838f536382a9d8c4cd2144faf6875c3a
Parents: 7b25fb9
Author: Weiwei Yang <ww...@apache.org>
Authored: Fri Jul 20 22:32:11 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Fri Jul 20 22:34:06 2018 +0800

----------------------------------------------------------------------
 .../capacity/allocator/ContainerAllocation.java |  2 +-
 .../allocator/RegularContainerAllocator.java    | 10 ++--
 .../capacity/TestCapacityScheduler.java         | 48 ++++++++++++++++++++
 3 files changed, 54 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbf20264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
index f408508..b9b9bcf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
@@ -56,7 +56,7 @@ public class ContainerAllocation {
 
   RMContainer containerToBeUnreserved;
   private Resource resourceToBeAllocated = Resources.none();
-  AllocationState state;
+  private AllocationState state;
   NodeType containerNodeType = NodeType.NODE_LOCAL;
   NodeType requestLocalityType = null;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbf20264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index 99a5b84..8f49b41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -263,7 +263,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
             reservedContainer, schedulingMode, resourceLimits);
     
     if (null == reservedContainer) {
-      if (result.state == AllocationState.PRIORITY_SKIPPED) {
+      if (result.getAllocationState() == AllocationState.PRIORITY_SKIPPED) {
         // Don't count 'skipped nodes' as a scheduling opportunity!
         application.subtractSchedulingOpportunity(schedulerKey);
       }
@@ -487,8 +487,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
 
       // When a returned allocation is LOCALITY_SKIPPED, since we're in
       // off-switch request now, we will skip this app w.r.t priorities 
-      if (allocation.state == AllocationState.LOCALITY_SKIPPED) {
-        allocation.state = AllocationState.APP_SKIPPED;
+      if (allocation.getAllocationState() == AllocationState.LOCALITY_SKIPPED) {
+        allocation = ContainerAllocation.APP_SKIPPED;
       }
       allocation.requestLocalityType = requestLocalityType;
 
@@ -836,8 +836,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       result = tryAllocateOnNode(clusterResource, node, schedulingMode,
           resourceLimits, schedulerKey, reservedContainer);
 
-      if (AllocationState.ALLOCATED == result.state
-          || AllocationState.RESERVED == result.state) {
+      if (AllocationState.ALLOCATED == result.getAllocationState()
+          || AllocationState.RESERVED == result.getAllocationState()) {
         result = doAllocation(result, node, schedulerKey, reservedContainer);
         break;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbf20264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 0b54010..79cdcfe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -134,6 +134,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.AllocationState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.ContainerAllocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
@@ -4930,4 +4932,50 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase {
     spyCs.handle(new NodeUpdateSchedulerEvent(
         spyCs.getNode(nm.getNodeId()).getRMNode()));
   }
+
+  // Testcase for YARN-8528
+  // This is to test whether ContainerAllocation constants are holding correct
+  // values during scheduling.
+  @Test
+  public void testContainerAllocationLocalitySkipped() throws Exception {
+    Assert.assertEquals(AllocationState.APP_SKIPPED,
+        ContainerAllocation.APP_SKIPPED.getAllocationState());
+    Assert.assertEquals(AllocationState.LOCALITY_SKIPPED,
+        ContainerAllocation.LOCALITY_SKIPPED.getAllocationState());
+    Assert.assertEquals(AllocationState.PRIORITY_SKIPPED,
+        ContainerAllocation.PRIORITY_SKIPPED.getAllocationState());
+    Assert.assertEquals(AllocationState.QUEUE_SKIPPED,
+        ContainerAllocation.QUEUE_SKIPPED.getAllocationState());
+
+    // init RM & NMs & Nodes
+    final MockRM rm = new MockRM(new CapacitySchedulerConfiguration());
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    rm.start();
+    final MockNM nm1 = rm.registerNode("h1:1234", 4 * GB);
+    final MockNM nm2 = rm.registerNode("h2:1234", 6 * GB); // maximum-allocation-mb = 6GB
+
+    // submit app and request resource
+    // container2 is larger than nm1 total resource, will trigger locality skip
+    final RMApp app = rm.submitApp(1 * GB, "app", "user");
+    final MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1);
+    am.addRequests(new String[] {"*"}, 5 * GB, 1, 1, 2);
+    am.schedule();
+
+    // container1 (am) should be acquired, container2 should not
+    RMNode node1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
+    cs.handle(new NodeUpdateSchedulerEvent(node1));
+    ContainerId cid = ContainerId.newContainerId(am.getApplicationAttemptId(), 1l);
+    Assert.assertEquals(cs.getRMContainer(cid).getState(), RMContainerState.ACQUIRED);
+    cid = ContainerId.newContainerId(am.getApplicationAttemptId(), 2l);
+    Assert.assertNull(cs.getRMContainer(cid));
+
+    Assert.assertEquals(AllocationState.APP_SKIPPED,
+        ContainerAllocation.APP_SKIPPED.getAllocationState());
+    Assert.assertEquals(AllocationState.LOCALITY_SKIPPED,
+        ContainerAllocation.LOCALITY_SKIPPED.getAllocationState());
+    Assert.assertEquals(AllocationState.PRIORITY_SKIPPED,
+        ContainerAllocation.PRIORITY_SKIPPED.getAllocationState());
+    Assert.assertEquals(AllocationState.QUEUE_SKIPPED,
+        ContainerAllocation.QUEUE_SKIPPED.getAllocationState());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] hadoop git commit: YARN-6966. NodeManager metrics may return wrong negative values when NM restart. (Szilard Nemeth via Haibo Chen)

Posted by in...@apache.org.
YARN-6966. NodeManager metrics may return wrong negative values when NM restart. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d3c39e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d3c39e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d3c39e9

Branch: refs/heads/HADOOP-15461
Commit: 9d3c39e9dd88b8f32223c01328581bb68507d415
Parents: 3a9e25e
Author: Haibo Chen <ha...@apache.org>
Authored: Mon Jul 23 11:06:44 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Mon Jul 23 11:07:24 2018 -0700

----------------------------------------------------------------------
 .../containermanager/ContainerManagerImpl.java  |  2 +-
 .../scheduler/ContainerScheduler.java           | 16 ++++--
 .../recovery/NMLeveldbStateStoreService.java    | 32 ++++++-----
 .../recovery/NMNullStateStoreService.java       |  2 +-
 .../recovery/NMStateStoreService.java           |  3 +-
 .../BaseContainerManagerTest.java               |  2 +-
 .../TestContainerManagerRecovery.java           | 57 ++++++++++++++++++++
 .../TestContainerSchedulerRecovery.java         | 46 +++++++++++-----
 .../metrics/TestNodeManagerMetrics.java         |  4 +-
 .../recovery/NMMemoryStateStoreService.java     | 16 +++++-
 .../TestNMLeveldbStateStoreService.java         | 21 +++++++-
 11 files changed, 163 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index ad63720..89bef8f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -496,7 +496,7 @@ public class ContainerManagerImpl extends CompositeService implements
     Container container = new ContainerImpl(getConfig(), dispatcher,
         launchContext, credentials, metrics, token, context, rcs);
     context.getContainers().put(token.getContainerID(), container);
-    containerScheduler.recoverActiveContainer(container, rcs.getStatus());
+    containerScheduler.recoverActiveContainer(container, rcs);
     app.handle(new ApplicationContainerInitEvent(container));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 5cdcf41..a61b9d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -44,6 +44,9 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai
 
 
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService
+        .RecoveredContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -259,11 +262,11 @@ public class ContainerScheduler extends AbstractService implements
    * @param rcs Recovered Container status
    */
   public void recoverActiveContainer(Container container,
-      RecoveredContainerStatus rcs) {
+      RecoveredContainerState rcs) {
     ExecutionType execType =
         container.getContainerTokenIdentifier().getExecutionType();
-    if (rcs == RecoveredContainerStatus.QUEUED
-        || rcs == RecoveredContainerStatus.PAUSED) {
+    if (rcs.getStatus() == RecoveredContainerStatus.QUEUED
+        || rcs.getStatus() == RecoveredContainerStatus.PAUSED) {
       if (execType == ExecutionType.GUARANTEED) {
         queuedGuaranteedContainers.put(container.getContainerId(), container);
       } else if (execType == ExecutionType.OPPORTUNISTIC) {
@@ -274,10 +277,15 @@ public class ContainerScheduler extends AbstractService implements
             "UnKnown execution type received " + container.getContainerId()
                 + ", execType " + execType);
       }
-    } else if (rcs == RecoveredContainerStatus.LAUNCHED) {
+    } else if (rcs.getStatus() == RecoveredContainerStatus.LAUNCHED) {
       runningContainers.put(container.getContainerId(), container);
       utilizationTracker.addContainerResources(container);
     }
+    if (rcs.getStatus() != RecoveredContainerStatus.COMPLETED
+            && rcs.getCapability() != null) {
+      metrics.launchedContainer();
+      metrics.allocateContainer(rcs.getCapability());
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 6f643b0..44f5e18 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
 import org.apache.hadoop.yarn.server.records.Version;
 import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.fusesource.leveldbjni.JniDBFactory;
@@ -237,7 +238,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       iter.seek(bytes(CONTAINERS_KEY_PREFIX));
 
       while (iter.hasNext()) {
-        Entry<byte[],byte[]> entry = iter.peekNext();
+        Entry<byte[], byte[]> entry = iter.peekNext();
         String key = asString(entry.getKey());
         if (!key.startsWith(CONTAINERS_KEY_PREFIX)) {
           break;
@@ -299,6 +300,10 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       if (suffix.equals(CONTAINER_REQUEST_KEY_SUFFIX)) {
         rcs.startRequest = new StartContainerRequestPBImpl(
             StartContainerRequestProto.parseFrom(entry.getValue()));
+        ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
+            .newContainerTokenIdentifier(rcs.startRequest.getContainerToken());
+        rcs.capability = new ResourcePBImpl(
+            containerTokenIdentifier.getProto().getResource());
       } else if (suffix.equals(CONTAINER_VERSION_KEY_SUFFIX)) {
         rcs.version = Integer.parseInt(asString(entry.getValue()));
       } else if (suffix.equals(CONTAINER_START_TIME_KEY_SUFFIX)) {
@@ -382,24 +387,25 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       LOG.debug("storeContainer: containerId= " + idStr
           + ", startRequest= " + startRequest);
     }
-    String keyRequest = getContainerKey(idStr, CONTAINER_REQUEST_KEY_SUFFIX);
-    String keyVersion = getContainerVersionKey(idStr);
-    String keyStartTime =
+    final String keyVersion = getContainerVersionKey(idStr);
+    final String keyRequest =
+        getContainerKey(idStr, CONTAINER_REQUEST_KEY_SUFFIX);
+    final StartContainerRequestProto startContainerRequest =
+        ((StartContainerRequestPBImpl) startRequest).getProto();
+
+    final String keyStartTime =
         getContainerKey(idStr, CONTAINER_START_TIME_KEY_SUFFIX);
+    final String startTimeValue = Long.toString(startTime);
+
     try {
-      WriteBatch batch = db.createWriteBatch();
-      try {
-        batch.put(bytes(keyRequest),
-            ((StartContainerRequestPBImpl) startRequest).getProto().
-                toByteArray());
-        batch.put(bytes(keyStartTime), bytes(Long.toString(startTime)));
+      try (WriteBatch batch = db.createWriteBatch()) {
+        batch.put(bytes(keyRequest), startContainerRequest.toByteArray());
+        batch.put(bytes(keyStartTime), bytes(startTimeValue));
         if (containerVersion != 0) {
           batch.put(bytes(keyVersion),
-              bytes(Integer.toString(containerVersion)));
+                  bytes(Integer.toString(containerVersion)));
         }
         db.write(batch);
-      } finally {
-        batch.close();
       }
     } catch (DBException e) {
       markStoreUnHealthy(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index f217f2f..dfad9cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -73,7 +73,7 @@ public class NMNullStateStoreService extends NMStateStoreService {
 
   @Override
   public void storeContainer(ContainerId containerId, int version,
-      long startTime, StartContainerRequest startRequest) throws IOException {
+      long startTime, StartContainerRequest startRequest) {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
index 0ea0ef3..70decdb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
@@ -416,7 +416,8 @@ public abstract class NMStateStoreService extends AbstractService {
    * @throws IOException
    */
   public abstract void storeContainer(ContainerId containerId,
-      int containerVersion, long startTime, StartContainerRequest startRequest)
+          int containerVersion, long startTime,
+          StartContainerRequest startRequest)
       throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index b31601c..493aa4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -107,7 +107,7 @@ public abstract class BaseContainerManagerTest {
   protected static File remoteLogDir;
   protected static File tmpDir;
 
-  protected final NodeManagerMetrics metrics = NodeManagerMetrics.create();
+  protected NodeManagerMetrics metrics = NodeManagerMetrics.create();
 
   public BaseContainerManagerTest() throws UnsupportedFileSystemException {
     localFS = FileContext.getLocalFSFileContext();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index 0a834af..a144adf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -106,6 +107,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.ContainerScheduler;
 
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.server.nodemanager.metrics.TestNodeManagerMetrics;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
@@ -401,6 +403,61 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
   }
 
   @Test
+  public void testNodeManagerMetricsRecovery() throws Exception {
+    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true);
+
+    NMStateStoreService stateStore = new NMMemoryStateStoreService();
+    stateStore.init(conf);
+    stateStore.start();
+    Context context = createContext(conf, stateStore);
+    ContainerManagerImpl cm = createContainerManager(context, delSrvc);
+    cm.init(conf);
+    cm.start();
+    metrics.addResource(Resource.newInstance(10240, 8));
+
+    // add an application by starting a container
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId cid = ContainerId.newContainerId(attemptId, 1);
+    Map<String, String> containerEnv = Collections.emptyMap();
+    Map<String, ByteBuffer> serviceData = Collections.emptyMap();
+    Map<String, LocalResource> localResources = Collections.emptyMap();
+    List<String> commands = Arrays.asList("sleep 60s".split(" "));
+    ContainerLaunchContext clc = ContainerLaunchContext.newInstance(
+        localResources, containerEnv, commands, serviceData,
+        null, null);
+    StartContainersResponse startResponse = startContainer(context, cm, cid,
+        clc, null, ContainerType.TASK);
+    assertTrue(startResponse.getFailedRequests().isEmpty());
+    assertEquals(1, context.getApplications().size());
+    Application app = context.getApplications().get(appId);
+    assertNotNull(app);
+
+    // make sure the container reaches RUNNING state
+    waitForNMContainerState(cm, cid,
+        org.apache.hadoop.yarn.server.nodemanager
+            .containermanager.container.ContainerState.RUNNING);
+    TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, 1, 1, 1, 9, 1, 7);
+
+    // restart and verify metrics could be recovered
+    cm.stop();
+    DefaultMetricsSystem.shutdown();
+    metrics = NodeManagerMetrics.create();
+    metrics.addResource(Resource.newInstance(10240, 8));
+    TestNodeManagerMetrics.checkMetrics(0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8);
+    context = createContext(conf, stateStore);
+    cm = createContainerManager(context, delSrvc);
+    cm.init(conf);
+    cm.start();
+    assertEquals(1, context.getApplications().size());
+    app = context.getApplications().get(appId);
+    assertNotNull(app);
+    TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, 1, 1, 1, 9, 1, 7);
+    cm.stop();
+  }
+
+  @Test
   public void testContainerResizeRecovery() throws Exception {
     conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
     conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerRecovery.java
index 2ae8b97..6b3ac67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerRecovery.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
 
 import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.doNothing;
@@ -31,6 +32,8 @@ import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService
+        .RecoveredContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
 import org.junit.After;
 import org.junit.Before;
@@ -71,6 +74,13 @@ public class TestContainerSchedulerRecovery {
 
   private ContainerScheduler spy;
 
+  private RecoveredContainerState createRecoveredContainerState(
+      RecoveredContainerStatus status) {
+    RecoveredContainerState mockState = mock(RecoveredContainerState.class);
+    when(mockState.getStatus()).thenReturn(status);
+    return mockState;
+  }
+
   @Before public void setUp() throws Exception {
     MockitoAnnotations.initMocks(this);
     spy = spy(tempContainerScheduler);
@@ -94,7 +104,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.QUEUED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.QUEUED);
     when(token.getExecutionType()).thenReturn(ExecutionType.GUARANTEED);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -113,7 +124,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.QUEUED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.QUEUED);
     when(token.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -132,7 +144,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.PAUSED;
+    RecoveredContainerState rcs =
+        createRecoveredContainerState(RecoveredContainerStatus.PAUSED);
     when(token.getExecutionType()).thenReturn(ExecutionType.GUARANTEED);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -151,7 +164,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.PAUSED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.PAUSED);
     when(token.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -170,7 +184,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.LAUNCHED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.LAUNCHED);
     when(token.getExecutionType()).thenReturn(ExecutionType.GUARANTEED);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -189,7 +204,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.LAUNCHED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.LAUNCHED);
     when(token.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -208,7 +224,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.REQUESTED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.REQUESTED);
     when(token.getExecutionType()).thenReturn(ExecutionType.GUARANTEED);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -227,7 +244,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.REQUESTED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.REQUESTED);
     when(token.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -246,7 +264,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.COMPLETED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.COMPLETED);
     when(token.getExecutionType()).thenReturn(ExecutionType.GUARANTEED);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -265,7 +284,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.COMPLETED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.COMPLETED);
     when(token.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
@@ -284,7 +304,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.QUEUED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.QUEUED);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
@@ -302,7 +323,8 @@ public class TestContainerSchedulerRecovery {
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());
     assertEquals(0, spy.getNumQueuedOpportunisticContainers());
     assertEquals(0, spy.getNumRunningContainers());
-    RecoveredContainerStatus rcs = RecoveredContainerStatus.PAUSED;
+    RecoveredContainerState rcs =
+            createRecoveredContainerState(RecoveredContainerStatus.PAUSED);
     when(container.getContainerTokenIdentifier()).thenReturn(token);
     spy.recoverActiveContainer(container, rcs);
     assertEquals(0, spy.getNumQueuedGuaranteedContainers());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java
index d21e7ad..c5f80ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java
@@ -113,8 +113,8 @@ public class TestNodeManagerMetrics {
     assertGauge("AvailableVCores", 19, rb);
   }
 
-  private void checkMetrics(int launched, int completed, int failed, int killed,
-      int initing, int running, int allocatedGB,
+  public static void checkMetrics(int launched, int completed, int failed,
+      int killed, int initing, int running, int allocatedGB,
       int allocatedContainers, int availableGB, int allocatedVCores,
       int availableVCores) {
     MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
index b67d11f..c5428d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
@@ -45,6 +46,9 @@ import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
 
+
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+
 public class NMMemoryStateStoreService extends NMStateStoreService {
   private Map<ApplicationId, ContainerManagerApplicationProto> apps;
   private Map<ContainerId, RecoveredContainerState> containerStates;
@@ -132,11 +136,19 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
 
   @Override
   public synchronized void storeContainer(ContainerId containerId,
-      int version, long startTime, StartContainerRequest startRequest)
-      throws IOException {
+      int version, long startTime, StartContainerRequest startRequest) {
     RecoveredContainerState rcs = new RecoveredContainerState();
     rcs.startRequest = startRequest;
     rcs.version = version;
+    try {
+      ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
+          .newContainerTokenIdentifier(startRequest.getContainerToken());
+      rcs.capability =
+          new ResourcePBImpl(containerTokenIdentifier.getProto().getResource());
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+
     rcs.setStartTime(startTime);
     containerStates.put(containerId, rcs);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index 265b3e6..c8c07d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
@@ -238,7 +238,9 @@ public class TestNMLeveldbStateStoreService {
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(appId, 4);
     ContainerId containerId = ContainerId.newContainerId(appAttemptId, 5);
-    StartContainerRequest containerReq = createContainerRequest(containerId);
+    Resource containerResource = Resource.newInstance(1024, 2);
+    StartContainerRequest containerReq =
+        createContainerRequest(containerId, containerResource);
 
     // store a container and verify recovered
     long containerStartTime = System.currentTimeMillis();
@@ -260,6 +262,7 @@ public class TestNMLeveldbStateStoreService {
     assertEquals(false, rcs.getKilled());
     assertEquals(containerReq, rcs.getStartRequest());
     assertTrue(rcs.getDiagnostics().isEmpty());
+    assertEquals(containerResource, rcs.getCapability());
 
     // store a new container record without StartContainerRequest
     ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 6);
@@ -279,6 +282,7 @@ public class TestNMLeveldbStateStoreService {
     assertEquals(false, rcs.getKilled());
     assertEquals(containerReq, rcs.getStartRequest());
     assertTrue(rcs.getDiagnostics().isEmpty());
+    assertEquals(containerResource, rcs.getCapability());
 
     // launch the container, add some diagnostics, and verify recovered
     StringBuilder diags = new StringBuilder();
@@ -294,6 +298,7 @@ public class TestNMLeveldbStateStoreService {
     assertEquals(false, rcs.getKilled());
     assertEquals(containerReq, rcs.getStartRequest());
     assertEquals(diags.toString(), rcs.getDiagnostics());
+    assertEquals(containerResource, rcs.getCapability());
 
     // pause the container, and verify recovered
     stateStore.storeContainerPaused(containerId);
@@ -395,7 +400,17 @@ public class TestNMLeveldbStateStoreService {
   }
 
   private StartContainerRequest createContainerRequest(
+          ContainerId containerId, Resource res) {
+    return createContainerRequestInternal(containerId, res);
+  }
+
+  private StartContainerRequest createContainerRequest(
       ContainerId containerId) {
+    return createContainerRequestInternal(containerId, null);
+  }
+
+  private StartContainerRequest createContainerRequestInternal(ContainerId
+          containerId, Resource res) {
     LocalResource lrsrc = LocalResource.newInstance(
         URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"),
         LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L,
@@ -421,6 +436,10 @@ public class TestNMLeveldbStateStoreService {
         localResources, env, containerCmds, serviceData, containerTokens,
         acls);
     Resource containerRsrc = Resource.newInstance(1357, 3);
+
+    if (res != null) {
+      containerRsrc = res;
+    }
     ContainerTokenIdentifier containerTokenId =
         new ContainerTokenIdentifier(containerId, "host", "user",
             containerRsrc, 9876543210L, 42, 2468, Priority.newInstance(7),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] hadoop git commit: HDDS-207. ozone listVolume command accepts random values as argument. Contributed by Lokesh Jain.

Posted by in...@apache.org.
HDDS-207. ozone listVolume command accepts random values as argument. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/129269f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/129269f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/129269f9

Branch: refs/heads/HADOOP-15461
Commit: 129269f98926775ccb5046d9dd41b58f1013211d
Parents: d5d4447
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jul 18 11:05:42 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 18 11:05:42 2018 -0700

----------------------------------------------------------------------
 .../src/test/acceptance/basic/ozone-shell.robot        |  8 +++++---
 .../apache/hadoop/ozone/ozShell/TestOzoneShell.java    | 12 ++++++++++--
 .../org/apache/hadoop/ozone/web/ozShell/Shell.java     |  1 +
 .../ozone/web/ozShell/volume/ListVolumeHandler.java    | 13 ++++++++++++-
 4 files changed, 28 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/129269f9/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
index f4be3e0..cc4b035 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
@@ -52,7 +52,9 @@ Test ozone shell
     ${result} =     Execute on          datanode        ozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root
                     Should not contain  ${result}       Failed
                     Should contain      ${result}       Creating Volume: ${volume}
-    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ozoneManager -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
+    ${result} =     Execute on          datanode        ozone oz -listVolume ${protocol}${server}/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
+                    Should contain      ${result}       createdOn
+    ${result} =     Execute on          datanode        ozone oz -listVolume -user bilbo | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | select(.volumeName=="${volume}")'
                     Should contain      ${result}       createdOn
                     Execute on          datanode        ozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB
     ${result} =     Execute on          datanode        ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
@@ -66,7 +68,7 @@ Test ozone shell
                     Should Be Equal     ${result}       GROUP
     ${result} =     Execute on          datanode        ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
                     Should Be Equal     ${result}       USER
-    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ozoneManager/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+    ${result} =     Execute on          datanode        ozone oz -listBucket ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
                     Should Be Equal     ${result}       ${volume}
                     Run Keyword and Return If           ${withkeytest}        Test key handling       ${protocol}       ${server}       ${volume}
                     Execute on          datanode        ozone oz -deleteBucket ${protocol}${server}/${volume}/bb1
@@ -80,6 +82,6 @@ Test key handling
                     Execute on          datanode        ls -l NOTICE.txt.1
     ${result} =     Execute on          datanode        ozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
                     Should contain      ${result}       createdOn
-    ${result} =     Execute on          datanode        ozone oz -listKey o3://ozoneManager/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+    ${result} =     Execute on          datanode        ozone oz -listKey ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
                     Should Be Equal     ${result}       key1
                     Execute on          datanode        ozone oz -deleteKey ${protocol}${server}/${volume}/bb1/key1 -v

http://git-wip-us.apache.org/repos/asf/hadoop/blob/129269f9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 000d530..8f53049 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -332,7 +333,7 @@ public class TestOzoneShell {
   public void testListVolume() throws Exception {
     LOG.info("Running testListVolume");
     String protocol = clientProtocol.getName().toLowerCase();
-    String commandOutput;
+    String commandOutput, commandError;
     List<VolumeInfo> volumes;
     final int volCount = 20;
     final String user1 = "test-user-a-" + protocol;
@@ -361,8 +362,15 @@ public class TestOzoneShell {
       assertNotNull(vol);
     }
 
+    String[] args = new String[] {"-listVolume", url + "/abcde", "-user",
+        user1, "-length", "100"};
+    assertEquals(1, ToolRunner.run(shell, args));
+    commandError = err.toString();
+    Assert.assertTrue(commandError.contains("Invalid URI:"));
+
+    err.reset();
     // test -length option
-    String[] args = new String[] {"-listVolume", url + "/", "-user",
+    args = new String[] {"-listVolume", url + "/", "-user",
         user1, "-length", "100"};
     assertEquals(0, ToolRunner.run(shell, args));
     commandOutput = out.toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/129269f9/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index 2aec0fc..726f4ca 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -207,6 +207,7 @@ public class Shell extends Configured implements Tool {
             "For example : ozone oz -listVolume <ozoneURI>" +
             "-user <username> -root or ozone oz " +
             "-listVolume");
+    listVolume.setOptionalArg(true);
     options.addOption(listVolume);
 
     Option updateVolume =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/129269f9/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
index 3749df4..85b7b2b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
+import com.google.common.base.Strings;
 import org.apache.commons.cli.CommandLine;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
@@ -30,6 +31,7 @@ import org.apache.hadoop.ozone.web.utils.JsonUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 
 import java.io.IOException;
+import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -77,7 +79,16 @@ public class ListVolumeHandler extends Handler {
     }
 
     String ozoneURIString = cmd.getOptionValue(Shell.LIST_VOLUME);
-    verifyURI(ozoneURIString);
+    if (Strings.isNullOrEmpty(ozoneURIString)) {
+      ozoneURIString = "/";
+    }
+    URI ozoneURI = verifyURI(ozoneURIString);
+    if (!Strings.isNullOrEmpty(ozoneURI.getPath()) && !ozoneURI.getPath()
+        .equals("/")) {
+      throw new OzoneClientException(
+          "Invalid URI: " + ozoneURI + " . Specified path not used." + ozoneURI
+              .getPath());
+    }
 
     if (cmd.hasOption(Shell.USER)) {
       userName = cmd.getOptionValue(Shell.USER);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

Posted by in...@apache.org.
HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccf2db7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccf2db7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccf2db7f

Branch: refs/heads/HADOOP-15461
Commit: ccf2db7fc2688d262df3309007cb12a4dfedc179
Parents: ba1ab08
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Jul 19 11:13:37 2018 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Jul 19 11:13:37 2018 -0500

----------------------------------------------------------------------
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccf2db7f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 46e36b3..bba8152 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -561,23 +561,28 @@ public class TestGroupsCaching {
 
     // Then expire that entry
     timer.advance(4 * 1000);
+    // Pause the getGroups operation and this will delay the cache refresh
+    FakeGroupMapping.pause();
 
     // Now get the cache entry - it should return immediately
     // with the old value and the cache will not have completed
     // a request to getGroups yet.
     assertEquals(groups.getGroups("me").size(), 2);
     assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+    // Resume the getGroups operation and the cache can get refreshed
+    FakeGroupMapping.resume();
 
-    // Now sleep for a short time and re-check the request count. It should have
-    // increased, but the exception means the cache will not have updated
-    Thread.sleep(50);
+    // Now wait for the refresh done, because of the exception, we expect
+    // a onFailure callback gets called and the counter for failure is 1
+    waitForGroupCounters(groups, 0, 0, 0, 1);
     FakeGroupMapping.setThrowException(false);
     assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
     assertEquals(groups.getGroups("me").size(), 2);
 
-    // Now sleep another short time - the 3rd call to getGroups above
-    // will have kicked off another refresh that updates the cache
-    Thread.sleep(50);
+    // Now the 3rd call to getGroups above will have kicked off
+    // another refresh that updates the cache, since it no longer gives
+    // exception, we now expect the counter for success is 1.
+    waitForGroupCounters(groups, 0, 0, 1, 1);
     assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
     assertEquals(groups.getGroups("me").size(), 3);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] hadoop git commit: HADOOP-15596. Stack trace should not be printed out when running hadoop key commands. Contributed by Kitti Nanasi.

Posted by in...@apache.org.
HADOOP-15596. Stack trace should not be printed out when running hadoop key commands. Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/993ec026
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/993ec026
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/993ec026

Branch: refs/heads/HADOOP-15461
Commit: 993ec026d10c7566fd358c022c061bca118c92f0
Parents: 1622a4b
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Jul 19 14:25:38 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Jul 20 19:46:46 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/crypto/key/KeyShell.java  | 32 +++++++++++++-------
 .../key/kms/LoadBalancingKMSClientProvider.java |  2 +-
 .../org/apache/hadoop/tools/CommandShell.java   |  6 +++-
 3 files changed, 27 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/993ec026/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
index fa84c47..3f8b337 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
@@ -265,8 +265,7 @@ public class KeyShell extends CommandShell {
           }
         }
       } catch (IOException e) {
-        getOut().println("Cannot list keys for KeyProvider: " + provider
-            + ": " + e.toString());
+        getOut().println("Cannot list keys for KeyProvider: " + provider);
         throw e;
       }
     }
@@ -318,12 +317,12 @@ public class KeyShell extends CommandShell {
           printProviderWritten();
         } catch (NoSuchAlgorithmException e) {
           getOut().println("Cannot roll key: " + keyName +
-              " within KeyProvider: " + provider + ". " + e.toString());
+              " within KeyProvider: " + provider + ".");
           throw e;
         }
       } catch (IOException e1) {
         getOut().println("Cannot roll key: " + keyName + " within KeyProvider: "
-            + provider + ". " + e1.toString());
+            + provider + ".");
         throw e1;
       }
     }
@@ -374,8 +373,8 @@ public class KeyShell extends CommandShell {
           }
           return cont;
         } catch (IOException e) {
-          getOut().println(keyName + " will not be deleted.");
-          e.printStackTrace(getErr());
+          getOut().println(keyName + " will not be deleted. "
+              + prettifyException(e));
         }
       }
       return true;
@@ -392,7 +391,7 @@ public class KeyShell extends CommandShell {
           getOut().println(keyName + " has been successfully deleted.");
           printProviderWritten();
         } catch (IOException e) {
-          getOut().println(keyName + " has not been deleted. " + e.toString());
+          getOut().println(keyName + " has not been deleted.");
           throw e;
         }
       }
@@ -463,13 +462,13 @@ public class KeyShell extends CommandShell {
             "with options " + options.toString() + ".");
         printProviderWritten();
       } catch (InvalidParameterException e) {
-        getOut().println(keyName + " has not been created. " + e.toString());
+        getOut().println(keyName + " has not been created.");
         throw e;
       } catch (IOException e) {
-        getOut().println(keyName + " has not been created. " + e.toString());
+        getOut().println(keyName + " has not been created.");
         throw e;
       } catch (NoSuchAlgorithmException e) {
-        getOut().println(keyName + " has not been created. " + e.toString());
+        getOut().println(keyName + " has not been created.");
         throw e;
       }
     }
@@ -520,7 +519,7 @@ public class KeyShell extends CommandShell {
         printProviderWritten();
       } catch (IOException e) {
         getOut().println("Cannot invalidate cache for key: " + keyName +
-            " within KeyProvider: " + provider + ". " + e.toString());
+            " within KeyProvider: " + provider + ".");
         throw e;
       }
     }
@@ -531,6 +530,17 @@ public class KeyShell extends CommandShell {
     }
   }
 
+  @Override
+  protected void printException(Exception e){
+    getErr().println("Executing command failed with " +
+        "the following exception: " + prettifyException(e));
+  }
+
+  private String prettifyException(Exception e) {
+    return e.getClass().getSimpleName() + ": " +
+        e.getLocalizedMessage().split("\n")[0];
+  }
+
   /**
    * main() entry point for the KeyShell.  While strictly speaking the
    * return is void, it will System.exit() with a return code: 0 is for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/993ec026/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 42cd47d..1ac3fd3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -145,7 +145,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         // compatible with earlier versions of LBKMSCP
         if (action.action == RetryAction.RetryDecision.FAIL
             && numFailovers >= providers.length - 1) {
-          LOG.warn("Aborting since the Request has failed with all KMS"
+          LOG.error("Aborting since the Request has failed with all KMS"
               + " providers(depending on {}={} setting and numProviders={})"
               + " in the group OR the exception is not recoverable",
               CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/993ec026/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
index 70c8eaf..a53e225 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
@@ -76,7 +76,7 @@ public abstract class CommandShell extends Configured implements Tool {
       }
     } catch (Exception e) {
       printShellUsage();
-      e.printStackTrace(err);
+      printException(e);
       return 1;
     }
     return exitCode;
@@ -98,6 +98,10 @@ public abstract class CommandShell extends Configured implements Tool {
     out.flush();
   }
 
+  protected void printException(Exception ex){
+    ex.printStackTrace(err);
+  }
+
   /**
    * Base class for any subcommands of this shell command.
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] hadoop git commit: HDFS-13583. RBF: Router admin clrQuota is not synchronized with nameservice. Contributed by Dibyendu Karmakar.

Posted by in...@apache.org.
HDFS-13583. RBF: Router admin clrQuota is not synchronized with nameservice. Contributed by Dibyendu Karmakar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17a87977
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17a87977
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17a87977

Branch: refs/heads/HADOOP-15461
Commit: 17a87977f29ced49724f561a68565217c8cb4e94
Parents: 8688a0c
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Jul 24 11:15:47 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Jul 24 11:15:47 2018 +0800

----------------------------------------------------------------------
 .../hdfs/server/federation/router/Quota.java    |  9 ++++++-
 .../federation/router/RouterAdminServer.java    |  8 ++++--
 .../federation/router/RouterQuotaManager.java   |  4 +--
 .../router/RouterQuotaUpdateService.java        |  2 +-
 .../federation/router/RouterQuotaUsage.java     |  4 +--
 .../federation/store/records/MountTable.java    |  4 +--
 .../store/records/impl/pb/MountTablePBImpl.java |  4 +--
 .../hdfs/tools/federation/RouterAdmin.java      |  8 +++---
 .../federation/router/TestRouterAdmin.java      |  8 ++++++
 .../federation/router/TestRouterAdminCLI.java   | 16 +++++++++---
 .../federation/router/TestRouterQuota.java      | 26 +++++++++++++++++---
 .../router/TestRouterQuotaManager.java          | 20 +++++++--------
 .../store/records/TestMountTable.java           |  4 +--
 13 files changed, 82 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index 75d3e04..846ccd1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -162,6 +162,8 @@ public class Quota {
   private QuotaUsage aggregateQuota(Map<RemoteLocation, QuotaUsage> results) {
     long nsCount = 0;
     long ssCount = 0;
+    long nsQuota = HdfsConstants.QUOTA_RESET;
+    long ssQuota = HdfsConstants.QUOTA_RESET;
     boolean hasQuotaUnSet = false;
 
     for (Map.Entry<RemoteLocation, QuotaUsage> entry : results.entrySet()) {
@@ -173,6 +175,8 @@ public class Quota {
         if (usage.getQuota() == -1 && usage.getSpaceQuota() == -1) {
           hasQuotaUnSet = true;
         }
+        nsQuota = usage.getQuota();
+        ssQuota = usage.getSpaceQuota();
 
         nsCount += usage.getFileAndDirectoryCount();
         ssCount += usage.getSpaceConsumed();
@@ -187,7 +191,10 @@ public class Quota {
     QuotaUsage.Builder builder = new QuotaUsage.Builder()
         .fileAndDirectoryCount(nsCount).spaceConsumed(ssCount);
     if (hasQuotaUnSet) {
-      builder.quota(HdfsConstants.QUOTA_DONT_SET);
+      builder.quota(HdfsConstants.QUOTA_RESET)
+          .spaceQuota(HdfsConstants.QUOTA_RESET);
+    } else {
+      builder.quota(nsQuota).spaceQuota(ssQuota);
     }
 
     return builder.build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 8e23eca..114f008 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -28,6 +28,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
@@ -253,8 +254,11 @@ public class RouterAdminServer extends AbstractService
 
     if (nsQuota != HdfsConstants.QUOTA_DONT_SET
         || ssQuota != HdfsConstants.QUOTA_DONT_SET) {
-      this.router.getRpcServer().getQuotaModule().setQuota(path, nsQuota,
-          ssQuota, null);
+      HdfsFileStatus ret = this.router.getRpcServer().getFileInfo(path);
+      if (ret != null) {
+        this.router.getRpcServer().getQuotaModule().setQuota(path, nsQuota,
+            ssQuota, null);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
index 0df34fc..87a8724 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
@@ -161,8 +161,8 @@ public class RouterQuotaManager {
       long ssQuota = quota.getSpaceQuota();
 
       // once nsQuota or ssQuota was set, this mount table is quota set
-      if (nsQuota != HdfsConstants.QUOTA_DONT_SET
-          || ssQuota != HdfsConstants.QUOTA_DONT_SET) {
+      if (nsQuota != HdfsConstants.QUOTA_RESET
+          || ssQuota != HdfsConstants.QUOTA_RESET) {
         return true;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
index 506e2ee..4813b53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
@@ -111,7 +111,7 @@ public class RouterQuotaUpdateService extends PeriodicService {
 
         // If quota is not set in some subclusters under federation path,
         // set quota for this path.
-        if (currentQuotaUsage.getQuota() == HdfsConstants.QUOTA_DONT_SET) {
+        if (currentQuotaUsage.getQuota() == HdfsConstants.QUOTA_RESET) {
           try {
             this.rpcServer.setQuota(src, nsQuota, ssQuota, null);
           } catch (IOException ioe) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
index eedd80f..18268aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
@@ -96,14 +96,14 @@ public final class RouterQuotaUsage extends QuotaUsage {
   public String toString() {
     String nsQuota = String.valueOf(getQuota());
     String nsCount = String.valueOf(getFileAndDirectoryCount());
-    if (getQuota() == HdfsConstants.QUOTA_DONT_SET) {
+    if (getQuota() == HdfsConstants.QUOTA_RESET) {
       nsQuota = "-";
       nsCount = "-";
     }
 
     String ssQuota = StringUtils.byteDesc(getSpaceQuota());
     String ssCount = StringUtils.byteDesc(getSpaceConsumed());
-    if (getSpaceQuota() == HdfsConstants.QUOTA_DONT_SET) {
+    if (getSpaceQuota() == HdfsConstants.QUOTA_RESET) {
       ssQuota = "-";
       ssCount = "-";
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
index 49cdf10..0e2e868 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
@@ -153,9 +153,9 @@ public abstract class MountTable extends BaseRecord {
     // Set quota for mount table
     RouterQuotaUsage quota = new RouterQuotaUsage.Builder()
         .fileAndDirectoryCount(RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT)
-        .quota(HdfsConstants.QUOTA_DONT_SET)
+        .quota(HdfsConstants.QUOTA_RESET)
         .spaceConsumed(RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT)
-        .spaceQuota(HdfsConstants.QUOTA_DONT_SET).build();
+        .spaceQuota(HdfsConstants.QUOTA_RESET).build();
     record.setQuota(quota);
 
     // Validate

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
index e62d0a8..4c7622c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
@@ -257,9 +257,9 @@ public class MountTablePBImpl extends MountTable implements PBRecord {
   public RouterQuotaUsage getQuota() {
     MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
 
-    long nsQuota = HdfsConstants.QUOTA_DONT_SET;
+    long nsQuota = HdfsConstants.QUOTA_RESET;
     long nsCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
-    long ssQuota = HdfsConstants.QUOTA_DONT_SET;
+    long ssQuota = HdfsConstants.QUOTA_RESET;
     long ssCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
     if (proto.hasQuota()) {
       QuotaUsageProto quotaProto = proto.getQuota();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index b0a2062..91e1669 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -632,8 +632,8 @@ public class RouterAdmin extends Configured implements Tool {
    * @throws IOException Error clearing the mount point.
    */
   private boolean clrQuota(String mount) throws IOException {
-    return updateQuota(mount, HdfsConstants.QUOTA_DONT_SET,
-        HdfsConstants.QUOTA_DONT_SET);
+    return updateQuota(mount, HdfsConstants.QUOTA_RESET,
+        HdfsConstants.QUOTA_RESET);
   }
 
   /**
@@ -668,8 +668,8 @@ public class RouterAdmin extends Configured implements Tool {
       long nsCount = existingEntry.getQuota().getFileAndDirectoryCount();
       long ssCount = existingEntry.getQuota().getSpaceConsumed();
       // If nsQuota and ssQuota were unset, clear nsQuota and ssQuota.
-      if (nsQuota == HdfsConstants.QUOTA_DONT_SET &&
-          ssQuota == HdfsConstants.QUOTA_DONT_SET) {
+      if (nsQuota == HdfsConstants.QUOTA_RESET &&
+          ssQuota == HdfsConstants.QUOTA_RESET) {
         nsCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
         ssCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
index 769bfe7..c834dcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
@@ -64,6 +64,8 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
 
 /**
  * The administrator interface of the {@link Router} implemented by
@@ -101,6 +103,12 @@ public class TestRouterAdmin {
     membership.registerNamenode(
         createNamenodeReport("ns1", "nn1", HAServiceState.ACTIVE));
     stateStore.refreshCaches(true);
+
+    RouterRpcServer spyRpcServer =
+        Mockito.spy(routerContext.getRouter().createRpcServer());
+    Whitebox
+        .setInternalState(routerContext.getRouter(), "rpcServer", spyRpcServer);
+    Mockito.doReturn(null).when(spyRpcServer).getFileInfo(Mockito.anyString());
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 5207f00..2da5fb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -115,6 +115,14 @@ public class TestRouterAdminCLI {
         Mockito.anyLong(), Mockito.anyLong(), Mockito.any());
     Whitebox.setInternalState(
         routerContext.getRouter().getRpcServer(), "quotaCall", quota);
+
+    RouterRpcServer spyRpcServer =
+        Mockito.spy(routerContext.getRouter().createRpcServer());
+    Whitebox
+        .setInternalState(routerContext.getRouter(), "rpcServer", spyRpcServer);
+
+    Mockito.doReturn(null).when(spyRpcServer).getFileInfo(Mockito.anyString());
+
   }
 
   @AfterClass
@@ -447,10 +455,10 @@ public class TestRouterAdminCLI {
     // verify the default quota set
     assertEquals(RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT,
         quotaUsage.getFileAndDirectoryCount());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaUsage.getQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota());
     assertEquals(RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT,
         quotaUsage.getSpaceConsumed());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaUsage.getSpaceQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota());
 
     long nsQuota = 50;
     long ssQuota = 100;
@@ -494,8 +502,8 @@ public class TestRouterAdminCLI {
     quotaUsage = mountTable.getQuota();
 
     // verify if quota unset successfully
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaUsage.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaUsage.getSpaceQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
index 431b394..6a29446 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
@@ -460,8 +461,10 @@ public class TestRouterQuota {
   public void testQuotaSynchronization() throws IOException {
     long updateNsQuota = 3;
     long updateSsQuota = 4;
+    FileSystem nnFs = nnContext1.getFileSystem();
+    nnFs.mkdirs(new Path("/testsync"));
     MountTable mountTable = MountTable.newInstance("/quotaSync",
-        Collections.singletonMap("ns0", "/"), Time.now(), Time.now());
+        Collections.singletonMap("ns0", "/testsync"), Time.now(), Time.now());
     mountTable.setQuota(new RouterQuotaUsage.Builder().quota(1)
         .spaceQuota(2).build());
     // Add new mount table
@@ -469,7 +472,7 @@ public class TestRouterQuota {
 
     // ensure the quota is not set as updated value
     QuotaUsage realQuota = nnContext1.getFileSystem()
-        .getQuotaUsage(new Path("/"));
+        .getQuotaUsage(new Path("/testsync"));
     assertNotEquals(updateNsQuota, realQuota.getQuota());
     assertNotEquals(updateSsQuota, realQuota.getSpaceQuota());
 
@@ -489,9 +492,26 @@ public class TestRouterQuota {
 
     // verify if the quota is updated in real path
     realQuota = nnContext1.getFileSystem().getQuotaUsage(
-        new Path("/"));
+        new Path("/testsync"));
     assertEquals(updateNsQuota, realQuota.getQuota());
     assertEquals(updateSsQuota, realQuota.getSpaceQuota());
+
+    // Clear the quota
+    mountTable.setQuota(new RouterQuotaUsage.Builder()
+        .quota(HdfsConstants.QUOTA_RESET)
+        .spaceQuota(HdfsConstants.QUOTA_RESET).build());
+
+    updateRequest = UpdateMountTableEntryRequest
+        .newInstance(mountTable);
+    client = routerContext.getAdminClient();
+    mountTableManager = client.getMountTableManager();
+    mountTableManager.updateMountTableEntry(updateRequest);
+
+    // verify if the quota is updated in real path
+    realQuota = nnContext1.getFileSystem().getQuotaUsage(
+        new Path("/testsync"));
+    assertEquals(HdfsConstants.QUOTA_RESET, realQuota.getQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, realQuota.getSpaceQuota());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
index ce3ee17..4a1dd2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
@@ -81,8 +81,8 @@ public class TestRouterQuotaManager {
 
     // test case2: get quota from an no-quota set path
     RouterQuotaUsage.Builder quota = new RouterQuotaUsage.Builder()
-        .quota(HdfsConstants.QUOTA_DONT_SET)
-        .spaceQuota(HdfsConstants.QUOTA_DONT_SET);
+        .quota(HdfsConstants.QUOTA_RESET)
+        .spaceQuota(HdfsConstants.QUOTA_RESET);
     manager.put("/noQuotaSet", quota.build());
     quotaGet = manager.getQuotaUsage("/noQuotaSet");
     // it should return null
@@ -90,36 +90,36 @@ public class TestRouterQuotaManager {
 
     // test case3: get quota from an quota-set path
     quota.quota(1);
-    quota.spaceQuota(HdfsConstants.QUOTA_DONT_SET);
+    quota.spaceQuota(HdfsConstants.QUOTA_RESET);
     manager.put("/hasQuotaSet", quota.build());
     quotaGet = manager.getQuotaUsage("/hasQuotaSet");
     assertEquals(1, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota());
 
     // test case4: get quota with an non-exist child path
     quotaGet = manager.getQuotaUsage("/hasQuotaSet/file");
     // it will return the nearest ancestor which quota was set
     assertEquals(1, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota());
 
     // test case5: get quota with an child path which its parent
     // wasn't quota set
-    quota.quota(HdfsConstants.QUOTA_DONT_SET);
-    quota.spaceQuota(HdfsConstants.QUOTA_DONT_SET);
+    quota.quota(HdfsConstants.QUOTA_RESET);
+    quota.spaceQuota(HdfsConstants.QUOTA_RESET);
     manager.put("/hasQuotaSet/noQuotaSet", quota.build());
     // here should returns the quota of path /hasQuotaSet
     // (the nearest ancestor which quota was set)
     quotaGet = manager.getQuotaUsage("/hasQuotaSet/noQuotaSet/file");
     assertEquals(1, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota());
 
     // test case6: get quota with an child path which its parent was quota set
     quota.quota(2);
-    quota.spaceQuota(HdfsConstants.QUOTA_DONT_SET);
+    quota.spaceQuota(HdfsConstants.QUOTA_RESET);
     manager.put("/hasQuotaSet/hasQuotaSet", quota.build());
     // here should return the quota of path /hasQuotaSet/hasQuotaSet
     quotaGet = manager.getQuotaUsage("/hasQuotaSet/hasQuotaSet/file");
     assertEquals(2, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
index 43cf176..05552738 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
@@ -84,9 +84,9 @@ public class TestMountTable {
 
     RouterQuotaUsage quota = record.getQuota();
     assertEquals(0, quota.getFileAndDirectoryCount());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quota.getQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quota.getQuota());
     assertEquals(0, quota.getSpaceConsumed());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quota.getSpaceQuota());
+    assertEquals(HdfsConstants.QUOTA_RESET, quota.getSpaceQuota());
 
     MountTable record2 =
         MountTable.newInstance(SRC, DST_MAP, DATE_CREATED, DATE_MOD);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] hadoop git commit: HDDS-199. Implement ReplicationManager to handle underreplication of closed containers. Contributed by Elek Marton.

Posted by in...@apache.org.
HDDS-199. Implement ReplicationManager to handle underreplication of closed containers. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a9e25ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a9e25ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a9e25ed

Branch: refs/heads/HADOOP-15461
Commit: 3a9e25edf53187f16ec9f9f6075e850b74b3b91f
Parents: 84d7bf1
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon Jul 23 10:13:53 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 23 10:28:33 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   7 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |   1 +
 .../common/src/main/resources/ozone-default.xml |  10 +
 .../container/replication/ReplicationQueue.java |  76 ------
 .../replication/ReplicationRequest.java         | 106 --------
 .../container/replication/package-info.java     |  23 --
 .../replication/TestReplicationQueue.java       | 134 ----------
 .../container/replication/package-info.java     |  23 --
 .../hadoop/hdds/server/events/EventWatcher.java |   4 +-
 .../hadoop/hdds/server/events/TypedEvent.java   |   5 +
 .../hdds/server/events/TestEventWatcher.java    |   6 +-
 .../algorithms/ContainerPlacementPolicy.java    |   5 +-
 .../placement/algorithms/SCMCommonPolicy.java   |   8 +-
 .../SCMContainerPlacementCapacity.java          |  16 +-
 .../algorithms/SCMContainerPlacementRandom.java |   7 +-
 .../replication/ReplicationCommandWatcher.java  |  56 +++++
 .../replication/ReplicationManager.java         | 242 +++++++++++++++++++
 .../container/replication/ReplicationQueue.java |  73 ++++++
 .../replication/ReplicationRequest.java         | 107 ++++++++
 .../scm/container/replication/package-info.java |  23 ++
 .../hadoop/hdds/scm/events/SCMEvents.java       |  31 +++
 .../scm/server/StorageContainerManager.java     |  42 +++-
 .../TestSCMContainerPlacementCapacity.java      | 106 ++++++++
 .../TestSCMContainerPlacementRandom.java        |  86 +++++++
 .../replication/TestReplicationManager.java     | 215 ++++++++++++++++
 .../replication/TestReplicationQueue.java       | 134 ++++++++++
 .../scm/container/replication/package-info.java |  23 ++
 .../placement/TestContainerPlacement.java       |   5 +-
 28 files changed, 1192 insertions(+), 382 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 6e940ad..e337d2f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -251,6 +251,13 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_CONTAINER_CLOSE_THRESHOLD =
       "ozone.scm.container.close.threshold";
   public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
+
+  public static final String HDDS_SCM_WATCHER_TIMEOUT =
+      "hdds.scm.watcher.timeout";
+
+  public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
+      "10m";
+
   /**
    * Never constructed.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 0273677..92f0c41 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+
 import org.apache.ratis.util.TimeDuration;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 84a3e0c..6ddf3c6 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1108,4 +1108,14 @@
     </description>
   </property>
 
+  <property>
+    <name>hdds.scm.watcher.timeout</name>
+    <value>10m</value>
+    <tag>OZONE, SCM, MANAGEMENT</tag>
+    <description>
+      Timeout for the watchers of the HDDS SCM CommandWatchers. After this
+      duration the Copy/Delete container commands will be sent again to the
+      datanode unless the datanode confirms the completion.
+    </description>
+  </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
deleted file mode 100644
index e0a2351..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.util.List;
-import java.util.PriorityQueue;
-import java.util.Queue;
-
-/**
- * Priority queue to handle under-replicated and over replicated containers
- * in ozone. ReplicationManager will consume these messages and decide
- * accordingly.
- */
-public class ReplicationQueue {
-
-  private final Queue<ReplicationRequest> queue;
-
-  ReplicationQueue() {
-    queue = new PriorityQueue<>();
-  }
-
-  public synchronized boolean add(ReplicationRequest repObj) {
-    if (this.queue.contains(repObj)) {
-      // Remove the earlier message and insert this one
-      this.queue.remove(repObj);
-    }
-    return this.queue.add(repObj);
-  }
-
-  public synchronized boolean remove(ReplicationRequest repObj) {
-    return queue.remove(repObj);
-  }
-
-  /**
-   * Retrieves, but does not remove, the head of this queue,
-   * or returns {@code null} if this queue is empty.
-   *
-   * @return the head of this queue, or {@code null} if this queue is empty
-   */
-  public synchronized ReplicationRequest peek() {
-    return queue.peek();
-  }
-
-  /**
-   * Retrieves and removes the head of this queue,
-   * or returns {@code null} if this queue is empty.
-   *
-   * @return the head of this queue, or {@code null} if this queue is empty
-   */
-  public synchronized ReplicationRequest poll() {
-    return queue.poll();
-  }
-
-  public synchronized boolean removeAll(List<ReplicationRequest> repObjs) {
-    return queue.removeAll(repObjs);
-  }
-
-  public int size() {
-    return queue.size();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
deleted file mode 100644
index a6ccce1..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.Serializable;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-
-/**
- * Wrapper class for hdds replication queue. Implements its natural
- * ordering for priority queue.
- */
-public class ReplicationRequest implements Comparable<ReplicationRequest>,
-    Serializable {
-  private final long containerId;
-  private final short replicationCount;
-  private final short expecReplicationCount;
-  private final long timestamp;
-
-  public ReplicationRequest(long containerId, short replicationCount,
-      long timestamp, short expecReplicationCount) {
-    this.containerId = containerId;
-    this.replicationCount = replicationCount;
-    this.timestamp = timestamp;
-    this.expecReplicationCount = expecReplicationCount;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object
-   * is less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  @Override
-  public int compareTo(ReplicationRequest o) {
-    if (o == null) {
-      return 1;
-    }
-    if (this == o) {
-      return 0;
-    }
-    int retVal = Integer
-        .compare(getReplicationCount() - getExpecReplicationCount(),
-            o.getReplicationCount() - o.getExpecReplicationCount());
-    if (retVal != 0) {
-      return retVal;
-    }
-    return Long.compare(getTimestamp(), o.getTimestamp());
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(91, 1011)
-        .append(getContainerId())
-        .toHashCode();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ReplicationRequest that = (ReplicationRequest) o;
-    return new EqualsBuilder().append(getContainerId(), that.getContainerId())
-        .isEquals();
-  }
-
-  public long getContainerId() {
-    return containerId;
-  }
-
-  public short getReplicationCount() {
-    return replicationCount;
-  }
-
-  public long getTimestamp() {
-    return timestamp;
-  }
-
-  public short getExpecReplicationCount() {
-    return expecReplicationCount;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 7f335e3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.replication;
-
-/**
- * Ozone Container replicaton related classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
deleted file mode 100644
index 6d74c68..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.util.Random;
-import java.util.UUID;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test class for ReplicationQueue.
- */
-public class TestReplicationQueue {
-
-  private ReplicationQueue replicationQueue;
-  private Random random;
-
-  @Before
-  public void setUp() {
-    replicationQueue = new ReplicationQueue();
-    random = new Random();
-  }
-
-  @Test
-  public void testDuplicateAddOp() {
-    long contId = random.nextLong();
-    String nodeId = UUID.randomUUID().toString();
-    ReplicationRequest obj1, obj2, obj3;
-    long time = Time.monotonicNow();
-    obj1 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
-    obj2 = new ReplicationRequest(contId, (short) 2, time + 1, (short) 3);
-    obj3 = new ReplicationRequest(contId, (short) 1, time+2, (short) 3);
-
-    replicationQueue.add(obj1);
-    replicationQueue.add(obj2);
-    replicationQueue.add(obj3);
-    Assert.assertEquals("Should add only 1 msg as second one is duplicate",
-        1, replicationQueue.size());
-    ReplicationRequest temp = replicationQueue.poll();
-    Assert.assertEquals(temp, obj3);
-  }
-
-  @Test
-  public void testPollOp() {
-    long contId = random.nextLong();
-    String nodeId = UUID.randomUUID().toString();
-    ReplicationRequest msg1, msg2, msg3, msg4, msg5;
-    msg1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
-        (short) 3);
-    long time = Time.monotonicNow();
-    msg2 = new ReplicationRequest(contId + 1, (short) 4, time, (short) 3);
-    msg3 = new ReplicationRequest(contId + 2, (short) 0, time, (short) 3);
-    msg4 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
-    // Replication message for same container but different nodeId
-    msg5 = new ReplicationRequest(contId + 1, (short) 2, time, (short) 3);
-
-    replicationQueue.add(msg1);
-    replicationQueue.add(msg2);
-    replicationQueue.add(msg3);
-    replicationQueue.add(msg4);
-    replicationQueue.add(msg5);
-    Assert.assertEquals("Should have 3 objects",
-        3, replicationQueue.size());
-
-    // Since Priority queue orders messages according to replication count,
-    // message with lowest replication should be first
-    ReplicationRequest temp;
-    temp = replicationQueue.poll();
-    Assert.assertEquals("Should have 2 objects",
-        2, replicationQueue.size());
-    Assert.assertEquals(temp, msg3);
-
-    temp = replicationQueue.poll();
-    Assert.assertEquals("Should have 1 objects",
-        1, replicationQueue.size());
-    Assert.assertEquals(temp, msg5);
-
-    // Message 2 should be ordered before message 5 as both have same replication
-    // number but message 2 has earlier timestamp.
-    temp = replicationQueue.poll();
-    Assert.assertEquals("Should have 0 objects",
-        replicationQueue.size(), 0);
-    Assert.assertEquals(temp, msg4);
-  }
-
-  @Test
-  public void testRemoveOp() {
-    long contId = random.nextLong();
-    String nodeId = UUID.randomUUID().toString();
-    ReplicationRequest obj1, obj2, obj3;
-    obj1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
-        (short) 3);
-    obj2 = new ReplicationRequest(contId + 1, (short) 2, Time.monotonicNow(),
-        (short) 3);
-    obj3 = new ReplicationRequest(contId + 2, (short) 3, Time.monotonicNow(),
-        (short) 3);
-
-    replicationQueue.add(obj1);
-    replicationQueue.add(obj2);
-    replicationQueue.add(obj3);
-    Assert.assertEquals("Should have 3 objects",
-        3, replicationQueue.size());
-
-    replicationQueue.remove(obj3);
-    Assert.assertEquals("Should have 2 objects",
-        2, replicationQueue.size());
-
-    replicationQueue.remove(obj2);
-    Assert.assertEquals("Should have 1 objects",
-        1, replicationQueue.size());
-
-    replicationQueue.remove(obj1);
-    Assert.assertEquals("Should have 0 objects",
-        0, replicationQueue.size());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 5b1fd0f..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * SCM Testing and Mocking Utils.
- */
-package org.apache.hadoop.ozone.container.replication;
-// Test classes for Replication functionality.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index 473c152..38386d4 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -180,9 +180,9 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   }
 
-  abstract void onTimeout(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+  protected abstract void onTimeout(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
 
-  abstract void onFinished(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+  protected abstract void onFinished(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
 
   public List<TIMEOUT_PAYLOAD> getTimeoutEvents(
       Predicate<? super TIMEOUT_PAYLOAD> predicate) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
index c2159ad..62e2419 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
@@ -48,4 +48,9 @@ public class TypedEvent<T> implements Event<T> {
     return name;
   }
 
+  @Override
+  public String toString() {
+    return "TypedEvent{" + "payloadType=" + payloadType + ", name='" + name
+        + '\'' + '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
index 8f18478..786b7b8 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
@@ -216,12 +216,12 @@ public class TestEventWatcher {
     }
 
     @Override
-    void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) {
+    protected void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) {
       publisher.fireEvent(UNDER_REPLICATED, payload);
     }
 
     @Override
-    void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) {
+    protected void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) {
       //Good job. We did it.
     }
 
@@ -231,8 +231,6 @@ public class TestEventWatcher {
     }
   }
 
-  ;
-
   private static class ReplicationCompletedEvent
       implements IdentifiableEventPayload {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
index 5d91ac5..3336c8e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
@@ -31,11 +31,14 @@ public interface ContainerPlacementPolicy {
   /**
    * Given the replication factor and size required, return set of datanodes
    * that satisfy the nodes and size requirement.
+   *
+   * @param excludedNodes - list of nodes to be excluded.
    * @param nodesRequired - number of datanodes required.
    * @param sizeRequired - size required for the container or block.
    * @return list of datanodes chosen.
    * @throws IOException
    */
-  List<DatanodeDetails> chooseDatanodes(int nodesRequired, long sizeRequired)
+  List<DatanodeDetails> chooseDatanodes(List<DatanodeDetails> excludedNodes,
+      int nodesRequired, long sizeRequired)
       throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
index 0a595d5..ba241dc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
@@ -95,16 +95,20 @@ public abstract class SCMCommonPolicy implements ContainerPlacementPolicy {
    * 3. if a set of containers are requested, we either meet the required
    * number of nodes or we fail that request.
    *
+   *
+   * @param excludedNodes - datanodes with existing replicas
    * @param nodesRequired - number of datanodes required.
    * @param sizeRequired - size required for the container or block.
    * @return list of datanodes chosen.
    * @throws SCMException SCM exception.
    */
 
-  public List<DatanodeDetails> chooseDatanodes(int nodesRequired, final long
-      sizeRequired) throws SCMException {
+  public List<DatanodeDetails> chooseDatanodes(
+      List<DatanodeDetails> excludedNodes,
+      int nodesRequired, final long sizeRequired) throws SCMException {
     List<DatanodeDetails> healthyNodes =
         nodeManager.getNodes(HddsProtos.NodeState.HEALTHY);
+    healthyNodes.removeAll(excludedNodes);
     String msg;
     if (healthyNodes.size() == 0) {
       msg = "No healthy node found to allocate container.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
index 85a6b54..8df8f6e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
@@ -17,17 +17,18 @@
 
 package org.apache.hadoop.hdds.scm.container.placement.algorithms;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.util.List;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.List;
-
 /**
  * Container placement policy that randomly choose datanodes with remaining
  * space to satisfy the size constraints.
@@ -83,6 +84,8 @@ public final class SCMContainerPlacementCapacity extends SCMCommonPolicy {
   /**
    * Called by SCM to choose datanodes.
    *
+   *
+   * @param excludedNodes - list of the datanodes to exclude.
    * @param nodesRequired - number of datanodes required.
    * @param sizeRequired - size required for the container or block.
    * @return List of datanodes.
@@ -90,9 +93,10 @@ public final class SCMContainerPlacementCapacity extends SCMCommonPolicy {
    */
   @Override
   public List<DatanodeDetails> chooseDatanodes(
-      final int nodesRequired, final long sizeRequired) throws SCMException {
+      List<DatanodeDetails> excludedNodes, final int nodesRequired,
+      final long sizeRequired) throws SCMException {
     List<DatanodeDetails> healthyNodes =
-        super.chooseDatanodes(nodesRequired, sizeRequired);
+        super.chooseDatanodes(excludedNodes, nodesRequired, sizeRequired);
     if (healthyNodes.size() == nodesRequired) {
       return healthyNodes;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
index 9903c84..76702d5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
@@ -56,6 +56,8 @@ public final class SCMContainerPlacementRandom extends SCMCommonPolicy
   /**
    * Choose datanodes called by the SCM to choose the datanode.
    *
+   *
+   * @param excludedNodes - list of the datanodes to exclude.
    * @param nodesRequired - number of datanodes required.
    * @param sizeRequired - size required for the container or block.
    * @return List of Datanodes.
@@ -63,9 +65,10 @@ public final class SCMContainerPlacementRandom extends SCMCommonPolicy
    */
   @Override
   public List<DatanodeDetails> chooseDatanodes(
-      final int nodesRequired, final long sizeRequired) throws SCMException {
+      List<DatanodeDetails> excludedNodes, final int nodesRequired,
+      final long sizeRequired) throws SCMException {
     List<DatanodeDetails> healthyNodes =
-        super.chooseDatanodes(nodesRequired, sizeRequired);
+        super.chooseDatanodes(excludedNodes, nodesRequired, sizeRequired);
 
     if (healthyNodes.size() == nodesRequired) {
       return healthyNodes;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationCommandWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationCommandWatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationCommandWatcher.java
new file mode 100644
index 0000000..03a81a7
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationCommandWatcher.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager
+    .ReplicationCompleted;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager
+    .ReplicationRequestToRepeat;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.EventWatcher;
+import org.apache.hadoop.ozone.lease.LeaseManager;
+
+/**
+ * Command watcher to track the replication commands.
+ */
+public class ReplicationCommandWatcher
+    extends
+    EventWatcher<ReplicationManager.ReplicationRequestToRepeat,
+        ReplicationManager.ReplicationCompleted> {
+
+  public ReplicationCommandWatcher(Event<ReplicationRequestToRepeat> startEvent,
+      Event<ReplicationCompleted> completionEvent,
+      LeaseManager<Long> leaseManager) {
+    super(startEvent, completionEvent, leaseManager);
+  }
+
+  @Override
+  protected void onTimeout(EventPublisher publisher,
+      ReplicationRequestToRepeat payload) {
+    //put back to the original queue
+    publisher.fireEvent(SCMEvents.REPLICATE_CONTAINER,
+        payload.getRequest());
+  }
+
+  @Override
+  protected void onFinished(EventPublisher publisher,
+      ReplicationRequestToRepeat payload) {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
new file mode 100644
index 0000000..5f78722
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -0,0 +1,242 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.ThreadFactory;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+    .ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
+import org.apache.hadoop.ozone.lease.LeaseManager;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents
+    .TRACK_REPLICATE_COMMAND;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Replication Manager manages the replication of the closed container.
+ */
+public class ReplicationManager implements Runnable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReplicationManager.class);
+
+  private ReplicationQueue replicationQueue;
+
+  private ContainerPlacementPolicy containerPlacement;
+
+  private EventPublisher eventPublisher;
+
+  private ReplicationCommandWatcher replicationCommandWatcher;
+
+  private boolean running = true;
+
+  private ContainerStateManager containerStateManager;
+
+  public ReplicationManager(ContainerPlacementPolicy containerPlacement,
+      ContainerStateManager containerStateManager, EventQueue eventQueue,
+      LeaseManager<Long> commandWatcherLeaseManager) {
+
+    this.containerPlacement = containerPlacement;
+    this.containerStateManager = containerStateManager;
+    this.eventPublisher = eventQueue;
+
+    this.replicationCommandWatcher =
+        new ReplicationCommandWatcher(TRACK_REPLICATE_COMMAND,
+            SCMEvents.REPLICATION_COMPLETE, commandWatcherLeaseManager);
+
+    this.replicationQueue = new ReplicationQueue();
+
+    eventQueue.addHandler(SCMEvents.REPLICATE_CONTAINER,
+        (replicationRequest, publisher) -> replicationQueue
+            .add(replicationRequest));
+
+    this.replicationCommandWatcher.start(eventQueue);
+
+  }
+
+  public void start() {
+
+    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true)
+        .setNameFormat("Replication Manager").build();
+
+    threadFactory.newThread(this).start();
+  }
+
+  public void run() {
+
+    while (running) {
+      ReplicationRequest request = null;
+      try {
+        //TODO: add throttling here
+        request = replicationQueue.take();
+
+        ContainerID containerID = new ContainerID(request.getContainerId());
+        ContainerInfo containerInfo =
+            containerStateManager.getContainer(containerID);
+
+        Preconditions.checkNotNull(containerInfo,
+            "No information about the container " + request.getContainerId());
+
+        Preconditions
+            .checkState(containerInfo.getState() == LifeCycleState.CLOSED,
+                "Container should be in closed state");
+
+        //check the current replication
+        List<DatanodeDetails> datanodesWithReplicas =
+            getCurrentReplicas(request);
+
+        ReplicationRequest finalRequest = request;
+
+        int inFlightReplications = replicationCommandWatcher.getTimeoutEvents(
+            e -> e.request.getContainerId() == finalRequest.getContainerId())
+            .size();
+
+        int deficit =
+            request.getExpecReplicationCount() - datanodesWithReplicas.size()
+                - inFlightReplications;
+
+        if (deficit > 0) {
+
+          List<DatanodeDetails> selectedDatanodes = containerPlacement
+              .chooseDatanodes(datanodesWithReplicas, deficit,
+                  containerInfo.getUsedBytes());
+
+          //send the command
+          for (DatanodeDetails datanode : selectedDatanodes) {
+
+            ReplicateContainerCommand replicateCommand =
+                new ReplicateContainerCommand(containerID.getId(),
+                    datanodesWithReplicas);
+
+            eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
+                new CommandForDatanode<>(
+                    datanode.getUuid(), replicateCommand));
+
+            ReplicationRequestToRepeat timeoutEvent =
+                new ReplicationRequestToRepeat(replicateCommand.getId(),
+                    request);
+
+            eventPublisher.fireEvent(TRACK_REPLICATE_COMMAND, timeoutEvent);
+
+          }
+
+        } else if (deficit < 0) {
+          //TODO: too many replicas. Not handled yet.
+        }
+
+      } catch (Exception e) {
+        LOG.error("Can't replicate container {}", request, e);
+      }
+    }
+
+  }
+
+  @VisibleForTesting
+  protected List<DatanodeDetails> getCurrentReplicas(ReplicationRequest request)
+      throws IOException {
+    //TODO: replication information is not yet available after HDDS-175,
+    // should be fixed after HDDS-228
+    return new ArrayList<>();
+  }
+
+  @VisibleForTesting
+  public ReplicationQueue getReplicationQueue() {
+    return replicationQueue;
+  }
+
+  public void stop() {
+    running = false;
+  }
+
+  /**
+   * Event for the ReplicationCommandWatcher to repeate the embedded request
+   * in case fof timeout.
+   */
+  public static class ReplicationRequestToRepeat
+      implements IdentifiableEventPayload {
+
+    private final long commandId;
+
+    private final ReplicationRequest request;
+
+    public ReplicationRequestToRepeat(long commandId,
+        ReplicationRequest request) {
+      this.commandId = commandId;
+      this.request = request;
+    }
+
+    public ReplicationRequest getRequest() {
+      return request;
+    }
+
+    @Override
+    public long getId() {
+      return commandId;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+      ReplicationRequestToRepeat that = (ReplicationRequestToRepeat) o;
+      return Objects.equals(request, that.request);
+    }
+
+    @Override
+    public int hashCode() {
+
+      return Objects.hash(request);
+    }
+  }
+
+  public static class ReplicationCompleted implements IdentifiableEventPayload {
+
+    private final long uuid;
+
+    public ReplicationCompleted(long uuid) {
+      this.uuid = uuid;
+    }
+
+    @Override
+    public long getId() {
+      return uuid;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java
new file mode 100644
index 0000000..4ca67be
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.PriorityBlockingQueue;
+
+/**
+ * Priority queue to handle under-replicated and over replicated containers
+ * in ozone. ReplicationManager will consume these messages and decide
+ * accordingly.
+ */
+public class ReplicationQueue {
+
+  private final BlockingQueue<ReplicationRequest> queue;
+
+  public ReplicationQueue() {
+    queue = new PriorityBlockingQueue<>();
+  }
+
+  public boolean add(ReplicationRequest repObj) {
+    if (this.queue.contains(repObj)) {
+      // Remove the earlier message and insert this one
+      this.queue.remove(repObj);
+    }
+    return this.queue.add(repObj);
+  }
+
+  public boolean remove(ReplicationRequest repObj) {
+    return queue.remove(repObj);
+  }
+
+  /**
+   * Retrieves, but does not remove, the head of this queue,
+   * or returns {@code null} if this queue is empty.
+   *
+   * @return the head of this queue, or {@code null} if this queue is empty
+   */
+  public ReplicationRequest peek() {
+    return queue.peek();
+  }
+
+  /**
+   * Retrieves and removes the head of this queue (blocking queue).
+   */
+  public ReplicationRequest take() throws InterruptedException {
+    return queue.take();
+  }
+
+  public boolean removeAll(List<ReplicationRequest> repObjs) {
+    return queue.removeAll(repObjs);
+  }
+
+  public int size() {
+    return queue.size();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java
new file mode 100644
index 0000000..ef7c546
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import java.io.Serializable;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+
+/**
+ * Wrapper class for hdds replication queue. Implements its natural
+ * ordering for priority queue.
+ */
+public class ReplicationRequest implements Comparable<ReplicationRequest>,
+    Serializable {
+  private final long containerId;
+  private final short replicationCount;
+  private final short expecReplicationCount;
+  private final long timestamp;
+
+  public ReplicationRequest(long containerId, short replicationCount,
+      long timestamp, short expecReplicationCount) {
+    this.containerId = containerId;
+    this.replicationCount = replicationCount;
+    this.timestamp = timestamp;
+    this.expecReplicationCount = expecReplicationCount;
+  }
+
+  /**
+   * Compares this object with the specified object for order.  Returns a
+   * negative integer, zero, or a positive integer as this object is less
+   * than, equal to, or greater than the specified object.
+   * @param o the object to be compared.
+   * @return a negative integer, zero, or a positive integer as this object
+   * is less than, equal to, or greater than the specified object.
+   * @throws NullPointerException if the specified object is null
+   * @throws ClassCastException   if the specified object's type prevents it
+   *                              from being compared to this object.
+   */
+  @Override
+  public int compareTo(ReplicationRequest o) {
+    if (o == null) {
+      return 1;
+    }
+    if (this == o) {
+      return 0;
+    }
+    int retVal = Integer
+        .compare(getReplicationCount() - getExpecReplicationCount(),
+            o.getReplicationCount() - o.getExpecReplicationCount());
+    if (retVal != 0) {
+      return retVal;
+    }
+    return Long.compare(getTimestamp(), o.getTimestamp());
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(91, 1011)
+        .append(getContainerId())
+        .toHashCode();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ReplicationRequest that = (ReplicationRequest) o;
+    return new EqualsBuilder().append(getContainerId(), that.getContainerId())
+        .isEquals();
+  }
+
+  public long getContainerId() {
+    return containerId;
+  }
+
+  public short getReplicationCount() {
+    return replicationCount;
+  }
+
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  public short getExpecReplicationCount() {
+    return expecReplicationCount;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
new file mode 100644
index 0000000..934b01e
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.replication;
+
+/**
+ * HDDS (Closed) Container replicaton related classes.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index 46f1588..ad1702b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -28,6 +28,10 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .ContainerReportFromDatanode;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .NodeReportFromDatanode;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager
+    .ReplicationCompleted;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 
 import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.TypedEvent;
@@ -129,6 +133,33 @@ public final class SCMEvents {
           "DeleteBlockCommandStatus");
 
   /**
+   * This is the command for ReplicationManager to handle under/over
+   * replication. Sent by the ContainerReportHandler after processing the
+   * heartbeat.
+   */
+  public static final TypedEvent<ReplicationRequest> REPLICATE_CONTAINER =
+      new TypedEvent<>(ReplicationRequest.class);
+
+  /**
+   * This event is sent by the ReplicaManager to the
+   * ReplicationCommandWatcher to track the in-progress replication.
+   */
+  public static final TypedEvent<ReplicationManager.ReplicationRequestToRepeat>
+      TRACK_REPLICATE_COMMAND =
+      new TypedEvent<>(ReplicationManager.ReplicationRequestToRepeat.class);
+  /**
+   * This event comes from the Heartbeat dispatcher (in fact from the
+   * datanode) to notify the scm that the replication is done. This is
+   * received by the replicate command watcher to mark in-progress task as
+   * finished.
+    <p>
+   * TODO: Temporary event, should be replaced by specific Heartbeat
+   * ActionRequred event.
+   */
+  public static final TypedEvent<ReplicationCompleted> REPLICATION_COMPLETE =
+      new TypedEvent<>(ReplicationCompleted.class);
+
+  /**
    * Private Ctor. Never Constructed.
    */
   private SCMEvents() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index aba6410..f4cd448 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
@@ -38,7 +39,12 @@ import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+    .ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+    .SCMContainerPlacementCapacity;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -61,9 +67,13 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
 import org.apache.hadoop.ozone.common.StorageInfo;
+import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.StringUtils;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -153,6 +163,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    * Key = DatanodeUuid, value = ContainerStat.
    */
   private Cache<String, ContainerStat> containerReportCache;
+  private final ReplicationManager replicationManager;
+  private final LeaseManager<Long> commandWatcherLeaseManager;
 
   /**
    * Creates a new StorageContainerManager. Configuration will be updated
@@ -207,6 +219,20 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
     eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
 
+    long watcherTimeout =
+        conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
+            HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
+
+    commandWatcherLeaseManager = new LeaseManager<>(watcherTimeout);
+
+    //TODO: support configurable containerPlacement policy
+    ContainerPlacementPolicy containerPlacementPolicy =
+        new SCMContainerPlacementCapacity(scmNodeManager, conf);
+
+    replicationManager = new ReplicationManager(containerPlacementPolicy,
+        scmContainerManager.getStateManager(), eventQueue,
+        commandWatcherLeaseManager);
+
     scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
         .OZONE_ADMINISTRATORS);
     scmUsername = UserGroupInformation.getCurrentUser().getUserName();
@@ -552,7 +578,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
 
     httpServer.start();
     scmBlockManager.start();
-
+    replicationManager.start();
     setStartTime();
   }
 
@@ -562,6 +588,20 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   public void stop() {
 
     try {
+      LOG.info("Stopping Replication Manager Service.");
+      replicationManager.stop();
+    } catch (Exception ex) {
+      LOG.error("Replication manager service stop failed.", ex);
+    }
+
+    try {
+      LOG.info("Stopping Lease Manager of the command watchers");
+      commandWatcherLeaseManager.shutdown();
+    } catch (Exception ex) {
+      LOG.error("Lease Manager of the command watchers stop failed");
+    }
+
+    try {
       LOG.info("Stopping datanode service RPC server");
       getDatanodeProtocolServer().stop();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
new file mode 100644
index 0000000..5966f2a
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.placement.algorithms;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+
+import org.junit.Assert;
+import org.junit.Test;
+import static org.mockito.Matchers.anyObject;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+
+public class TestSCMContainerPlacementCapacity {
+  @Test
+  public void chooseDatanodes() throws SCMException {
+    //given
+    Configuration conf = new OzoneConfiguration();
+
+    List<DatanodeDetails> datanodes = new ArrayList<>();
+    for (int i = 0; i < 7; i++) {
+      datanodes.add(TestUtils.getDatanodeDetails());
+    }
+
+    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
+    when(mockNodeManager.getNodes(NodeState.HEALTHY))
+        .thenReturn(new ArrayList<>(datanodes));
+
+    when(mockNodeManager.getNodeStat(anyObject()))
+        .thenReturn(new SCMNodeMetric(100l, 0L, 100L));
+    when(mockNodeManager.getNodeStat(datanodes.get(2)))
+        .thenReturn(new SCMNodeMetric(100l, 90L, 10L));
+    when(mockNodeManager.getNodeStat(datanodes.get(3)))
+        .thenReturn(new SCMNodeMetric(100l, 80L, 20L));
+    when(mockNodeManager.getNodeStat(datanodes.get(4)))
+        .thenReturn(new SCMNodeMetric(100l, 70L, 30L));
+
+    SCMContainerPlacementCapacity scmContainerPlacementRandom =
+        new SCMContainerPlacementCapacity(mockNodeManager, conf);
+
+    List<DatanodeDetails> existingNodes = new ArrayList<>();
+    existingNodes.add(datanodes.get(0));
+    existingNodes.add(datanodes.get(1));
+
+    Map<DatanodeDetails, Integer> selectedCount = new HashMap<>();
+    for (DatanodeDetails datanode : datanodes) {
+      selectedCount.put(datanode, 0);
+    }
+
+    for (int i = 0; i < 1000; i++) {
+
+      //when
+      List<DatanodeDetails> datanodeDetails =
+          scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15);
+
+      //then
+      Assert.assertEquals(1, datanodeDetails.size());
+      DatanodeDetails datanode0Details = datanodeDetails.get(0);
+
+      Assert.assertNotEquals(
+          "Datanode 0 should not been selected: excluded by parameter",
+          datanodes.get(0), datanode0Details);
+      Assert.assertNotEquals(
+          "Datanode 1 should not been selected: excluded by parameter",
+          datanodes.get(1), datanode0Details);
+      Assert.assertNotEquals(
+          "Datanode 2 should not been selected: not enough space there",
+          datanodes.get(2), datanode0Details);
+
+      selectedCount
+          .put(datanode0Details, selectedCount.get(datanode0Details) + 1);
+
+    }
+
+    //datanode 4 has less space. Should be selected less times.
+    Assert.assertTrue(selectedCount.get(datanodes.get(3)) > selectedCount
+        .get(datanodes.get(6)));
+    Assert.assertTrue(selectedCount.get(datanodes.get(4)) > selectedCount
+        .get(datanodes.get(6)));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
new file mode 100644
index 0000000..430c181
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.placement.algorithms;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+
+import org.junit.Assert;
+import org.junit.Test;
+import static org.mockito.Matchers.anyObject;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+
+public class TestSCMContainerPlacementRandom {
+
+  @Test
+  public void chooseDatanodes() throws SCMException {
+    //given
+    Configuration conf = new OzoneConfiguration();
+
+    List<DatanodeDetails> datanodes = new ArrayList<>();
+    for (int i = 0; i < 5; i++) {
+      datanodes.add(TestUtils.getDatanodeDetails());
+    }
+
+    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
+    when(mockNodeManager.getNodes(NodeState.HEALTHY))
+        .thenReturn(new ArrayList<>(datanodes));
+
+    when(mockNodeManager.getNodeStat(anyObject()))
+        .thenReturn(new SCMNodeMetric(100l, 0l, 100l));
+    when(mockNodeManager.getNodeStat(datanodes.get(2)))
+        .thenReturn(new SCMNodeMetric(100l, 90l, 10l));
+
+    SCMContainerPlacementRandom scmContainerPlacementRandom =
+        new SCMContainerPlacementRandom(mockNodeManager, conf);
+
+    List<DatanodeDetails> existingNodes = new ArrayList<>();
+    existingNodes.add(datanodes.get(0));
+    existingNodes.add(datanodes.get(1));
+
+    for (int i = 0; i < 100; i++) {
+      //when
+      List<DatanodeDetails> datanodeDetails =
+          scmContainerPlacementRandom.chooseDatanodes(existingNodes, 1, 15);
+
+      //then
+      Assert.assertEquals(1, datanodeDetails.size());
+      DatanodeDetails datanode0Details = datanodeDetails.get(0);
+
+      Assert.assertNotEquals(
+          "Datanode 0 should not been selected: excluded by parameter",
+          datanodes.get(0), datanode0Details);
+      Assert.assertNotEquals(
+          "Datanode 1 should not been selected: excluded by parameter",
+          datanodes.get(1), datanode0Details);
+      Assert.assertNotEquals(
+          "Datanode 2 should not been selected: not enough space there",
+          datanodes.get(2), datanode0Details);
+
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
new file mode 100644
index 0000000..e3e876b
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+    .ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager
+    .ReplicationRequestToRepeat;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.lease.LeaseManager;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+
+import com.google.common.base.Preconditions;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents
+    .TRACK_REPLICATE_COMMAND;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import static org.mockito.Matchers.anyObject;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test behaviour of the TestReplication.
+ */
+public class TestReplicationManager {
+
+  private EventQueue queue;
+
+  private List<ReplicationRequestToRepeat> trackReplicationEvents;
+
+  private List<CommandForDatanode<ReplicateContainerCommandProto>> copyEvents;
+
+  private ContainerStateManager containerStateManager;
+
+  private ContainerPlacementPolicy containerPlacementPolicy;
+  private List<DatanodeDetails> listOfDatanodeDetails;
+
+  @Before
+  public void initReplicationManager() throws IOException {
+
+    listOfDatanodeDetails = TestUtils.getListOfDatanodeDetails(5);
+
+    containerPlacementPolicy =
+        (excludedNodes, nodesRequired, sizeRequired) -> listOfDatanodeDetails
+            .subList(2, 2 + nodesRequired);
+
+    containerStateManager = Mockito.mock(ContainerStateManager.class);
+
+    //container with 2 replicas
+    ContainerInfo containerInfo = new ContainerInfo.Builder()
+        .setState(LifeCycleState.CLOSED)
+        .build();
+
+    when(containerStateManager.getContainer(anyObject()))
+        .thenReturn(containerInfo);
+
+    queue = new EventQueue();
+
+    trackReplicationEvents = new ArrayList<>();
+    queue.addHandler(TRACK_REPLICATE_COMMAND,
+        (event, publisher) -> trackReplicationEvents.add(event));
+
+    copyEvents = new ArrayList<>();
+    queue.addHandler(SCMEvents.DATANODE_COMMAND,
+        (event, publisher) -> copyEvents.add(event));
+
+  }
+
+  @Test
+  public void testEventSending() throws InterruptedException, IOException {
+
+
+    //GIVEN
+
+    LeaseManager<Long> leaseManager = new LeaseManager<>(100000L);
+    try {
+      leaseManager.start();
+
+      ReplicationManager replicationManager =
+          new ReplicationManager(containerPlacementPolicy,
+              containerStateManager,
+              queue, leaseManager) {
+            @Override
+            protected List<DatanodeDetails> getCurrentReplicas(
+                ReplicationRequest request) throws IOException {
+              return listOfDatanodeDetails.subList(0, 2);
+            }
+          };
+      replicationManager.start();
+
+      //WHEN
+
+      queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
+          new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+              (short) 3));
+
+      Thread.sleep(500L);
+      queue.processAll(1000L);
+
+      //THEN
+
+      Assert.assertEquals(1, trackReplicationEvents.size());
+      Assert.assertEquals(1, copyEvents.size());
+    } finally {
+      if (leaseManager != null) {
+        leaseManager.shutdown();
+      }
+    }
+  }
+
+  @Test
+  public void testCommandWatcher() throws InterruptedException, IOException {
+
+    Logger.getRootLogger().setLevel(Level.DEBUG);
+    LeaseManager<Long> leaseManager = new LeaseManager<>(1000L);
+
+    try {
+      leaseManager.start();
+
+      ReplicationManager replicationManager =
+          new ReplicationManager(containerPlacementPolicy, containerStateManager,
+
+
+              queue, leaseManager) {
+            @Override
+            protected List<DatanodeDetails> getCurrentReplicas(
+                ReplicationRequest request) throws IOException {
+              return listOfDatanodeDetails.subList(0, 2);
+            }
+          };
+      replicationManager.start();
+
+      queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
+          new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+              (short) 3));
+
+      Thread.sleep(500L);
+
+      queue.processAll(1000L);
+
+      Assert.assertEquals(1, trackReplicationEvents.size());
+      Assert.assertEquals(1, copyEvents.size());
+
+      Assert.assertEquals(trackReplicationEvents.get(0).getId(),
+          copyEvents.get(0).getCommand().getId());
+
+      //event is timed out
+      Thread.sleep(1500);
+
+      queue.processAll(1000L);
+
+      //original copy command + retry
+      Assert.assertEquals(2, trackReplicationEvents.size());
+      Assert.assertEquals(2, copyEvents.size());
+
+    } finally {
+      if (leaseManager != null) {
+        leaseManager.shutdown();
+      }
+    }
+  }
+
+  public static Pipeline createPipeline(Iterable<DatanodeDetails> ids)
+      throws IOException {
+    Objects.requireNonNull(ids, "ids == null");
+    final Iterator<DatanodeDetails> i = ids.iterator();
+    Preconditions.checkArgument(i.hasNext());
+    final DatanodeDetails leader = i.next();
+    String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(3);
+    final Pipeline pipeline =
+        new Pipeline(leader.getUuidString(), LifeCycleState.OPEN,
+            ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
+    pipeline.addMember(leader);
+    for (; i.hasNext(); ) {
+      pipeline.addMember(i.next());
+    }
+    return pipeline;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
new file mode 100644
index 0000000..a593718
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import java.util.Random;
+import java.util.UUID;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for ReplicationQueue.
+ */
+public class TestReplicationQueue {
+
+  private ReplicationQueue replicationQueue;
+  private Random random;
+
+  @Before
+  public void setUp() {
+    replicationQueue = new ReplicationQueue();
+    random = new Random();
+  }
+
+  @Test
+  public void testDuplicateAddOp() throws InterruptedException {
+    long contId = random.nextLong();
+    String nodeId = UUID.randomUUID().toString();
+    ReplicationRequest obj1, obj2, obj3;
+    long time = Time.monotonicNow();
+    obj1 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
+    obj2 = new ReplicationRequest(contId, (short) 2, time + 1, (short) 3);
+    obj3 = new ReplicationRequest(contId, (short) 1, time+2, (short) 3);
+
+    replicationQueue.add(obj1);
+    replicationQueue.add(obj2);
+    replicationQueue.add(obj3);
+    Assert.assertEquals("Should add only 1 msg as second one is duplicate",
+        1, replicationQueue.size());
+    ReplicationRequest temp = replicationQueue.take();
+    Assert.assertEquals(temp, obj3);
+  }
+
+  @Test
+  public void testPollOp() throws InterruptedException {
+    long contId = random.nextLong();
+    String nodeId = UUID.randomUUID().toString();
+    ReplicationRequest msg1, msg2, msg3, msg4, msg5;
+    msg1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
+        (short) 3);
+    long time = Time.monotonicNow();
+    msg2 = new ReplicationRequest(contId + 1, (short) 4, time, (short) 3);
+    msg3 = new ReplicationRequest(contId + 2, (short) 0, time, (short) 3);
+    msg4 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
+    // Replication message for same container but different nodeId
+    msg5 = new ReplicationRequest(contId + 1, (short) 2, time, (short) 3);
+
+    replicationQueue.add(msg1);
+    replicationQueue.add(msg2);
+    replicationQueue.add(msg3);
+    replicationQueue.add(msg4);
+    replicationQueue.add(msg5);
+    Assert.assertEquals("Should have 3 objects",
+        3, replicationQueue.size());
+
+    // Since Priority queue orders messages according to replication count,
+    // message with lowest replication should be first
+    ReplicationRequest temp;
+    temp = replicationQueue.take();
+    Assert.assertEquals("Should have 2 objects",
+        2, replicationQueue.size());
+    Assert.assertEquals(temp, msg3);
+
+    temp = replicationQueue.take();
+    Assert.assertEquals("Should have 1 objects",
+        1, replicationQueue.size());
+    Assert.assertEquals(temp, msg5);
+
+    // Message 2 should be ordered before message 5 as both have same replication
+    // number but message 2 has earlier timestamp.
+    temp = replicationQueue.take();
+    Assert.assertEquals("Should have 0 objects",
+        replicationQueue.size(), 0);
+    Assert.assertEquals(temp, msg4);
+  }
+
+  @Test
+  public void testRemoveOp() {
+    long contId = random.nextLong();
+    String nodeId = UUID.randomUUID().toString();
+    ReplicationRequest obj1, obj2, obj3;
+    obj1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
+        (short) 3);
+    obj2 = new ReplicationRequest(contId + 1, (short) 2, Time.monotonicNow(),
+        (short) 3);
+    obj3 = new ReplicationRequest(contId + 2, (short) 3, Time.monotonicNow(),
+        (short) 3);
+
+    replicationQueue.add(obj1);
+    replicationQueue.add(obj2);
+    replicationQueue.add(obj3);
+    Assert.assertEquals("Should have 3 objects",
+        3, replicationQueue.size());
+
+    replicationQueue.remove(obj3);
+    Assert.assertEquals("Should have 2 objects",
+        2, replicationQueue.size());
+
+    replicationQueue.remove(obj2);
+    Assert.assertEquals("Should have 1 objects",
+        1, replicationQueue.size());
+
+    replicationQueue.remove(obj1);
+    Assert.assertEquals("Should have 0 objects",
+        0, replicationQueue.size());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
new file mode 100644
index 0000000..1423c99
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * SCM Testing and Mocking Utils.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+// Test classes for Replication functionality.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
index 651b776..802f2ef 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 
@@ -86,11 +87,11 @@ public class TestContainerPlacement {
     for (int x = 0; x < opsCount; x++) {
       long containerSize = random.nextInt(100) * OzoneConsts.GB;
       List<DatanodeDetails> nodesCapacity =
-          capacityPlacer.chooseDatanodes(nodesRequired, containerSize);
+          capacityPlacer.chooseDatanodes(new ArrayList<>(), nodesRequired, containerSize);
       assertEquals(nodesRequired, nodesCapacity.size());
 
       List<DatanodeDetails> nodesRandom =
-          randomPlacer.chooseDatanodes(nodesRequired, containerSize);
+          randomPlacer.chooseDatanodes(nodesCapacity, nodesRequired, containerSize);
 
       // One fifth of all calls are delete
       if (x % 5 == 0) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] hadoop git commit: HADOOP-15610. Fixed pylint version for Hadoop docker image. Contributed by Jack Bearden

Posted by in...@apache.org.
HADOOP-15610.  Fixed pylint version for Hadoop docker image.
               Contributed by Jack Bearden


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba1ab08f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba1ab08f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba1ab08f

Branch: refs/heads/HADOOP-15461
Commit: ba1ab08fdae96ad7c9c4f4bf8672abd741b7f758
Parents: c492eac
Author: Eric Yang <ey...@apache.org>
Authored: Wed Jul 18 20:09:43 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Wed Jul 18 20:09:43 2018 -0400

----------------------------------------------------------------------
 dev-support/docker/Dockerfile | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1ab08f/dev-support/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 369c606..a8c5c12 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -154,9 +154,10 @@ RUN apt-get -q update && apt-get -q install -y shellcheck
 RUN apt-get -q update && apt-get -q install -y bats
 
 ####
-# Install pylint (always want latest)
+# Install pylint at fixed version (2.0.0 removed python2 support)
+# https://github.com/PyCQA/pylint/issues/2294
 ####
-RUN pip2 install pylint
+RUN pip2 install pylint==1.9.2
 
 ####
 # Install dateutil.parser


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] hadoop git commit: HDDS-258. Helper methods to generate NodeReport and ContainerReport for testing. Contributed by Nanda Kumar.

Posted by in...@apache.org.
HDDS-258. Helper methods to generate NodeReport and ContainerReport for testing. Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ced3efe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ced3efe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ced3efe

Branch: refs/heads/HADOOP-15461
Commit: 2ced3efe94eecc3e6076be1f0341bf6a2f2affab
Parents: 17a8797
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon Jul 23 21:29:44 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 23 22:04:20 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |  27 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   | 372 ++++++++++++++-----
 .../command/TestCommandStatusReportHandler.java |   2 +-
 .../hdds/scm/container/MockNodeManager.java     |   4 +-
 .../scm/container/TestContainerMapping.java     |   4 +-
 .../container/closer/TestContainerCloser.java   |   2 +-
 .../TestSCMContainerPlacementCapacity.java      |   2 +-
 .../TestSCMContainerPlacementRandom.java        |   2 +-
 .../replication/TestReplicationManager.java     |   7 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   4 -
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  80 ++--
 .../hdds/scm/node/TestNodeReportHandler.java    |  19 +-
 .../scm/node/TestSCMNodeStorageStatMap.java     |  13 +-
 .../TestSCMDatanodeHeartbeatDispatcher.java     |   4 +-
 .../ozone/container/common/TestEndPoint.java    |  74 ++--
 .../hadoop/ozone/TestMiniOzoneCluster.java      |  20 +-
 .../container/metrics/TestContainerMetrics.java |   2 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../container/server/TestContainerServer.java   |   4 +-
 19 files changed, 406 insertions(+), 238 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 7370b07..fca08bd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -342,7 +342,8 @@ public class SCMNodeManager
   public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
     return VersionResponse.newBuilder()
         .setVersion(this.version.getVersion())
-        .addValue(OzoneConsts.SCM_ID, this.scmManager.getScmStorage().getScmId())
+        .addValue(OzoneConsts.SCM_ID,
+            this.scmManager.getScmStorage().getScmId())
         .addValue(OzoneConsts.CLUSTER_ID, this.scmManager.getScmStorage()
             .getClusterID())
         .build();
@@ -364,15 +365,11 @@ public class SCMNodeManager
   public RegisteredCommand register(
       DatanodeDetails datanodeDetails, NodeReportProto nodeReport) {
 
-    String hostname = null;
-    String ip = null;
     InetAddress dnAddress = Server.getRemoteIp();
     if (dnAddress != null) {
       // Mostly called inside an RPC, update ip and peer hostname
-      hostname = dnAddress.getHostName();
-      ip = dnAddress.getHostAddress();
-      datanodeDetails.setHostName(hostname);
-      datanodeDetails.setIpAddress(ip);
+      datanodeDetails.setHostName(dnAddress.getHostName());
+      datanodeDetails.setIpAddress(dnAddress.getHostAddress());
     }
     UUID dnId = datanodeDetails.getUuid();
     try {
@@ -390,14 +387,12 @@ public class SCMNodeManager
       LOG.trace("Datanode is already registered. Datanode: {}",
           datanodeDetails.toString());
     }
-    RegisteredCommand.Builder builder =
-        RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
-            .setDatanodeUUID(datanodeDetails.getUuidString())
-            .setClusterID(this.clusterID);
-    if (hostname != null && ip != null) {
-      builder.setHostname(hostname).setIpAddress(ip);
-    }
-    return builder.build();
+    return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
+        .setDatanodeUUID(datanodeDetails.getUuidString())
+        .setClusterID(this.clusterID)
+        .setHostname(datanodeDetails.getHostName())
+        .setIpAddress(datanodeDetails.getIpAddress())
+        .build();
   }
 
   /**
@@ -430,7 +425,7 @@ public class SCMNodeManager
    */
   @Override
   public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) {
-      this.updateNodeStat(dnUuid, nodeReport);
+    this.updateNodeStat(dnUuid, nodeReport);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 8d7a2c2..c466570 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -18,7 +18,9 @@ package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
+    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol
     .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol
@@ -31,40 +33,181 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 
+import java.io.File;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
-import java.util.Random;
 import java.util.UUID;
+import java.util.concurrent.ThreadLocalRandom;
 
 /**
  * Stateless helper functions to handler scm/datanode connection.
  */
 public final class TestUtils {
 
+  private static ThreadLocalRandom random = ThreadLocalRandom.current();
+
   private TestUtils() {
   }
 
-  public static DatanodeDetails getDatanodeDetails(SCMNodeManager nodeManager) {
+  /**
+   * Creates DatanodeDetails with random UUID.
+   *
+   * @return DatanodeDetails
+   */
+  public static DatanodeDetails randomDatanodeDetails() {
+    return createDatanodeDetails(UUID.randomUUID());
+  }
+
+  /**
+   * Creates DatanodeDetails using the given UUID.
+   *
+   * @param uuid Datanode's UUID
+   *
+   * @return DatanodeDetails
+   */
+  private static DatanodeDetails createDatanodeDetails(UUID uuid) {
+    String ipAddress = random.nextInt(256)
+        + "." + random.nextInt(256)
+        + "." + random.nextInt(256)
+        + "." + random.nextInt(256);
+   return createDatanodeDetails(uuid.toString(), "localhost", ipAddress);
+  }
 
-    return getDatanodeDetails(nodeManager, UUID.randomUUID().toString());
+  /**
+   * Generates DatanodeDetails from RegisteredCommand.
+   *
+   * @param registeredCommand registration response from SCM
+   *
+   * @return DatanodeDetails
+   */
+  public static DatanodeDetails getDatanodeDetails(
+      RegisteredCommand registeredCommand) {
+    return createDatanodeDetails(registeredCommand.getDatanodeUUID(),
+        registeredCommand.getHostName(), registeredCommand.getIpAddress());
+  }
+
+  /**
+   * Creates DatanodeDetails with the given information.
+   *
+   * @param uuid      Datanode's UUID
+   * @param hostname  hostname of Datanode
+   * @param ipAddress ip address of Datanode
+   *
+   * @return DatanodeDetails
+   */
+  private static DatanodeDetails createDatanodeDetails(String uuid,
+      String hostname, String ipAddress) {
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, 0);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, 0);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, 0);
+    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
+    builder.setUuid(uuid)
+        .setHostName(hostname)
+        .setIpAddress(ipAddress)
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort);
+    return builder.build();
+  }
+
+  /**
+   * Creates a random DatanodeDetails and register it with the given
+   * NodeManager.
+   *
+   * @param nodeManager NodeManager
+   *
+   * @return DatanodeDetails
+   */
+  public static DatanodeDetails createRandomDatanodeAndRegister(
+      SCMNodeManager nodeManager) {
+    return getDatanodeDetails(
+        nodeManager.register(randomDatanodeDetails(), null));
+  }
+
+  /**
+   * Get specified number of DatanodeDetails and register them with node
+   * manager.
+   *
+   * @param nodeManager node manager to register the datanode ids.
+   * @param count       number of DatanodeDetails needed.
+   *
+   * @return list of DatanodeDetails
+   */
+  public static List<DatanodeDetails> getListOfRegisteredDatanodeDetails(
+      SCMNodeManager nodeManager, int count) {
+    ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
+    for (int i = 0; i < count; i++) {
+      datanodes.add(createRandomDatanodeAndRegister(nodeManager));
+    }
+    return datanodes;
+  }
+
+  /**
+   * Generates a random NodeReport.
+   *
+   * @return NodeReportProto
+   */
+  public static NodeReportProto getRandomNodeReport() {
+    return getRandomNodeReport(1);
+  }
+
+  /**
+   * Generates random NodeReport with the given number of storage report in it.
+   *
+   * @param numberOfStorageReport number of storage report this node report
+   *                              should have
+   * @return NodeReportProto
+   */
+  public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) {
+    UUID nodeId = UUID.randomUUID();
+    return getRandomNodeReport(nodeId, File.separator + nodeId,
+        numberOfStorageReport);
+  }
+
+  /**
+   * Generates random NodeReport for the given nodeId with the given
+   * base path and number of storage report in it.
+   *
+   * @param nodeId                datanode id
+   * @param basePath              base path of storage directory
+   * @param numberOfStorageReport number of storage report
+   *
+   * @return NodeReportProto
+   */
+  public static NodeReportProto getRandomNodeReport(UUID nodeId,
+      String basePath, int numberOfStorageReport) {
+    List<StorageReportProto> storageReports = new ArrayList<>();
+    for (int i = 0; i < numberOfStorageReport; i++) {
+      storageReports.add(getRandomStorageReport(nodeId,
+          basePath + File.separator + i));
+    }
+    return createNodeReport(storageReports);
   }
 
   /**
-   * Create a new DatanodeDetails with NodeID set to the string.
+   * Creates NodeReport with the given storage reports.
+   *
+   * @param reports one or more storage report
    *
-   * @param uuid - node ID, it is generally UUID.
-   * @return DatanodeID.
+   * @return NodeReportProto
    */
-  public static DatanodeDetails getDatanodeDetails(SCMNodeManager nodeManager,
-      String uuid) {
-    DatanodeDetails datanodeDetails = getDatanodeDetails(uuid);
-    nodeManager.register(datanodeDetails, null);
-    return datanodeDetails;
+  public static NodeReportProto createNodeReport(
+      StorageReportProto... reports) {
+    return createNodeReport(Arrays.asList(reports));
   }
 
   /**
-   * Create Node Report object.
+   * Creates NodeReport with the given storage reports.
+   *
+   * @param reports storage reports to be included in the node report.
+   *
    * @return NodeReportProto
    */
   public static NodeReportProto createNodeReport(
@@ -75,100 +218,163 @@ public final class TestUtils {
   }
 
   /**
-   * Create SCM Storage Report object.
-   * @return list of SCMStorageReport
+   * Generates random storage report.
+   *
+   * @param nodeId datanode id for which the storage report belongs to
+   * @param path   path of the storage
+   *
+   * @return StorageReportProto
    */
-  public static List<StorageReportProto> createStorageReport(long capacity,
-      long used, long remaining, String path, StorageTypeProto type, String id,
-      int count) {
-    List<StorageReportProto> reportList = new ArrayList<>();
-    for (int i = 0; i < count; i++) {
-      Preconditions.checkNotNull(path);
-      Preconditions.checkNotNull(id);
-      StorageReportProto.Builder srb = StorageReportProto.newBuilder();
-      srb.setStorageUuid(id).setStorageLocation(path).setCapacity(capacity)
-          .setScmUsed(used).setRemaining(remaining);
-      StorageTypeProto storageTypeProto =
-          type == null ? StorageTypeProto.DISK : type;
-      srb.setStorageType(storageTypeProto);
-      reportList.add(srb.build());
-    }
-    return reportList;
+  public static StorageReportProto getRandomStorageReport(UUID nodeId,
+      String path) {
+    return createStorageReport(nodeId, path,
+        random.nextInt(1000),
+        random.nextInt(500),
+        random.nextInt(500),
+        StorageTypeProto.DISK);
   }
 
   /**
-   * Create Command Status report object.
-   * @return CommandStatusReportsProto
+   * Creates storage report with the given information.
+   *
+   * @param nodeId    datanode id
+   * @param path      storage dir
+   * @param capacity  storage size
+   * @param used      space used
+   * @param remaining space remaining
+   * @param type      type of storage
+   *
+   * @return StorageReportProto
    */
-  public static CommandStatusReportsProto createCommandStatusReport(
-      List<CommandStatus> reports) {
-    CommandStatusReportsProto.Builder report = CommandStatusReportsProto
-        .newBuilder();
-    report.addAllCmdStatus(reports);
-    return report.build();
+  public static StorageReportProto createStorageReport(UUID nodeId, String path,
+      long capacity, long used, long remaining, StorageTypeProto type) {
+    Preconditions.checkNotNull(nodeId);
+    Preconditions.checkNotNull(path);
+    StorageReportProto.Builder srb = StorageReportProto.newBuilder();
+    srb.setStorageUuid(nodeId.toString())
+        .setStorageLocation(path)
+        .setCapacity(capacity)
+        .setScmUsed(used)
+        .setRemaining(remaining);
+    StorageTypeProto storageTypeProto =
+        type == null ? StorageTypeProto.DISK : type;
+    srb.setStorageType(storageTypeProto);
+   return srb.build();
   }
 
 
   /**
-   * Get specified number of DatanodeDetails and registered them with node
-   * manager.
+   * Generates random container reports
    *
-   * @param nodeManager - node manager to register the datanode ids.
-   * @param count       - number of DatanodeDetails needed.
-   * @return
+   * @return ContainerReportsProto
    */
-  public static List<DatanodeDetails> getListOfRegisteredDatanodeDetails(
-      SCMNodeManager nodeManager, int count) {
-    ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
-    for (int i = 0; i < count; i++) {
-      datanodes.add(getDatanodeDetails(nodeManager));
+  public static ContainerReportsProto getRandomContainerReports() {
+    return getRandomContainerReports(1);
+  }
+
+  /**
+   * Generates random container report with the given number of containers.
+   *
+   * @param numberOfContainers number of containers to be in container report
+   *
+   * @return ContainerReportsProto
+   */
+  public static ContainerReportsProto getRandomContainerReports(
+      int numberOfContainers) {
+   List<ContainerInfo> containerInfos = new ArrayList<>();
+    for (int i = 0; i < numberOfContainers; i++) {
+      containerInfos.add(getRandomContainerInfo(i));
     }
-    return datanodes;
+    return getContainerReports(containerInfos);
   }
 
   /**
-   * Get a datanode details.
+   * Creates container report with the given ContainerInfo(s).
    *
-   * @return DatanodeDetails
+   * @param containerInfos one or more ContainerInfo
+   *
+   * @return ContainerReportsProto
    */
-  public static DatanodeDetails getDatanodeDetails() {
-    return getDatanodeDetails(UUID.randomUUID().toString());
+  public static ContainerReportsProto getContainerReports(
+      ContainerInfo... containerInfos) {
+    return getContainerReports(Arrays.asList(containerInfos));
   }
 
-  private static DatanodeDetails getDatanodeDetails(String uuid) {
-    Random random = new Random();
-    String ipAddress =
-        random.nextInt(256) + "." + random.nextInt(256) + "." + random
-            .nextInt(256) + "." + random.nextInt(256);
+  /**
+   * Creates container report with the given ContainerInfo(s).
+   *
+   * @param containerInfos list of ContainerInfo
+   *
+   * @return ContainerReportsProto
+   */
+  public static ContainerReportsProto getContainerReports(
+      List<ContainerInfo> containerInfos) {
+    ContainerReportsProto.Builder
+        reportsBuilder = ContainerReportsProto.newBuilder();
+    for (ContainerInfo containerInfo : containerInfos) {
+      reportsBuilder.addReports(containerInfo);
+    }
+    return reportsBuilder.build();
+  }
 
-    String hostName = uuid;
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(uuid)
-        .setHostName("localhost")
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
+  /**
+   * Generates random ContainerInfo.
+   *
+   * @param containerId container id of the ContainerInfo
+   *
+   * @return ContainerInfo
+   */
+  public static ContainerInfo getRandomContainerInfo(long containerId) {
+    return createContainerInfo(containerId,
+        OzoneConsts.GB * 5,
+        random.nextLong(1000),
+        OzoneConsts.GB * random.nextInt(5),
+        random.nextLong(1000),
+        OzoneConsts.GB * random.nextInt(2),
+        random.nextLong(1000),
+        OzoneConsts.GB * random.nextInt(5));
   }
 
   /**
-   * Get specified number of list of DatanodeDetails.
+   * Creates ContainerInfo with the given details.
    *
-   * @param count - number of datanode IDs needed.
-   * @return
+   * @param containerId id of the container
+   * @param size        size of container
+   * @param keyCount    number of keys
+   * @param bytesUsed   bytes used by the container
+   * @param readCount   number of reads
+   * @param readBytes   bytes read
+   * @param writeCount  number of writes
+   * @param writeBytes  bytes written
+   *
+   * @return ContainerInfo
    */
-  public static List<DatanodeDetails> getListOfDatanodeDetails(int count) {
-    ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
-    for (int i = 0; i < count; i++) {
-      datanodes.add(getDatanodeDetails());
-    }
-    return datanodes;
+  public static ContainerInfo createContainerInfo(
+      long containerId, long size, long keyCount, long bytesUsed,
+      long readCount, long readBytes, long writeCount, long writeBytes) {
+    return ContainerInfo.newBuilder()
+        .setContainerID(containerId)
+        .setSize(size)
+        .setKeyCount(keyCount)
+        .setUsed(bytesUsed)
+        .setReadCount(readCount)
+        .setReadBytes(readBytes)
+        .setWriteCount(writeCount)
+        .setWriteBytes(writeBytes)
+        .build();
   }
+
+  /**
+   * Create Command Status report object.
+   * @return CommandStatusReportsProto
+   */
+  public static CommandStatusReportsProto createCommandStatusReport(
+      List<CommandStatus> reports) {
+    CommandStatusReportsProto.Builder report = CommandStatusReportsProto
+        .newBuilder();
+    report.addAllCmdStatus(reports);
+    return report.build();
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
index 5e64e57..eca5b87 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
@@ -97,7 +97,7 @@ public class TestCommandStatusReportHandler implements EventPublisher {
       reports) {
     CommandStatusReportsProto report = TestUtils.createCommandStatusReport
         (reports);
-    DatanodeDetails dn = TestUtils.getDatanodeDetails();
+    DatanodeDetails dn = TestUtils.randomDatanodeDetails();
     return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode
         (dn, report);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 593b780..088b700 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
+import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -41,7 +42,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 
-import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
     .HEALTHY;
@@ -80,7 +80,7 @@ public class MockNodeManager implements NodeManager {
     aggregateStat = new SCMNodeStat();
     if (initializeFakeNodes) {
       for (int x = 0; x < nodeCount; x++) {
-        DatanodeDetails dd = getDatanodeDetails();
+        DatanodeDetails dd = TestUtils.randomDatanodeDetails();
         populateNodeMetric(dd, x);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index 42ab126..79ac9cf 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -192,7 +192,7 @@ public class TestContainerMapping {
   @Test
   public void testFullContainerReport() throws IOException {
     ContainerInfo info = createContainer();
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
     List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
         new ArrayList<>();
     StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
@@ -226,7 +226,7 @@ public class TestContainerMapping {
   @Test
   public void testContainerCloseWithContainerReport() throws IOException {
     ContainerInfo info = createContainer();
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
     List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
         new ArrayList<>();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index f7863bc..cc25544 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -219,7 +219,7 @@ public class TestContainerCloser {
         .setWriteBytes(2000000000L)
         .setDeleteTransactionId(0);
     reports.addReports(ciBuilder);
-    mapping.processContainerReports(TestUtils.getDatanodeDetails(),
+    mapping.processContainerReports(TestUtils.randomDatanodeDetails(),
         reports.build());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 5966f2a..fea1e4b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -44,7 +44,7 @@ public class TestSCMContainerPlacementCapacity {
 
     List<DatanodeDetails> datanodes = new ArrayList<>();
     for (int i = 0; i < 7; i++) {
-      datanodes.add(TestUtils.getDatanodeDetails());
+      datanodes.add(TestUtils.randomDatanodeDetails());
     }
 
     NodeManager mockNodeManager = Mockito.mock(NodeManager.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 430c181..3b4426c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -43,7 +43,7 @@ public class TestSCMContainerPlacementRandom {
 
     List<DatanodeDetails> datanodes = new ArrayList<>();
     for (int i = 0; i < 5; i++) {
-      datanodes.add(TestUtils.getDatanodeDetails());
+      datanodes.add(TestUtils.randomDatanodeDetails());
     }
 
     NodeManager mockNodeManager = Mockito.mock(NodeManager.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index e3e876b..99ec59f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -73,7 +73,12 @@ public class TestReplicationManager {
   @Before
   public void initReplicationManager() throws IOException {
 
-    listOfDatanodeDetails = TestUtils.getListOfDatanodeDetails(5);
+    listOfDatanodeDetails = new ArrayList<>();
+    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
 
     containerPlacementPolicy =
         (excludedNodes, nodesRequired, sizeRequired) -> listOfDatanodeDetails

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 48567ee..2fef620 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -142,10 +142,6 @@ public class TestContainerPlacement {
         TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
     try {
       for (DatanodeDetails datanodeDetails : datanodes) {
-        String id = UUID.randomUUID().toString();
-        String path = testDir.getAbsolutePath() + "/" + id;
-        List<StorageReportProto> reports = TestUtils
-            .createStorageReport(capacity, used, remaining, path, null, id, 1);
         nodeManager.processHeartbeat(datanodeDetails);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 5275992..cfa20be 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -154,8 +154,8 @@ public class TestNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
       // Send some heartbeats from different nodes.
       for (int x = 0; x < nodeManager.getMinimumChillModeNodes(); x++) {
-        DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
-            nodeManager);
+        DatanodeDetails datanodeDetails = TestUtils
+            .createRandomDatanodeAndRegister(nodeManager);
         nodeManager.processHeartbeat(datanodeDetails);
       }
 
@@ -200,7 +200,8 @@ public class TestNodeManager {
 
       // Need 100 nodes to come out of chill mode, only one node is sending HB.
       nodeManager.setMinimumChillModeNodes(100);
-      nodeManager.processHeartbeat(TestUtils.getDatanodeDetails(nodeManager));
+      nodeManager.processHeartbeat(TestUtils
+          .createRandomDatanodeAndRegister(nodeManager));
       //TODO: wait for heartbeat to be processed
       Thread.sleep(4 * 1000);
       assertFalse("Not enough heartbeat, Node manager should have" +
@@ -223,7 +224,7 @@ public class TestNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
       nodeManager.setMinimumChillModeNodes(3);
       DatanodeDetails datanodeDetails = TestUtils
-          .getDatanodeDetails(nodeManager);
+          .createRandomDatanodeAndRegister(nodeManager);
 
       // Send 10 heartbeat from same node, and assert we never leave chill mode.
       for (int x = 0; x < 10; x++) {
@@ -253,7 +254,8 @@ public class TestNodeManager {
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
     SCMNodeManager nodeManager = createNodeManager(conf);
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(nodeManager);
+    DatanodeDetails datanodeDetails = TestUtils
+        .createRandomDatanodeAndRegister(nodeManager);
     nodeManager.close();
 
     // These should never be processed.
@@ -276,14 +278,14 @@ public class TestNodeManager {
     OzoneConfiguration conf = getConf();
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
-    String dnId = datanodeDetails.getUuidString();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
+    UUID dnId = datanodeDetails.getUuid();
     String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-    List<StorageReportProto> reports =
-        TestUtils.createStorageReport(100, 10, 90, storagePath, null, dnId, 1);
+    StorageReportProto report =
+        TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null);
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       nodemanager.register(datanodeDetails,
-          TestUtils.createNodeReport(reports));
+          TestUtils.createNodeReport(report));
       List<SCMCommand> command = nodemanager.processHeartbeat(datanodeDetails);
       Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails));
       Assert.assertTrue("On regular HB calls, SCM responses a "
@@ -331,8 +333,8 @@ public class TestNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
 
       for (int x = 0; x < count; x++) {
-        DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
-            nodeManager);
+        DatanodeDetails datanodeDetails = TestUtils
+            .createRandomDatanodeAndRegister(nodeManager);
         nodeManager.processHeartbeat(datanodeDetails);
       }
       //TODO: wait for heartbeat to be processed
@@ -421,7 +423,7 @@ public class TestNodeManager {
       List<DatanodeDetails> nodeList = createNodeSet(nodeManager, nodeCount);
 
 
-      DatanodeDetails staleNode = TestUtils.getDatanodeDetails(nodeManager);
+      DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister(nodeManager);
 
       // Heartbeat once
       nodeManager.processHeartbeat(staleNode);
@@ -560,11 +562,11 @@ public class TestNodeManager {
      */
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       DatanodeDetails healthyNode =
-          TestUtils.getDatanodeDetails(nodeManager);
+          TestUtils.createRandomDatanodeAndRegister(nodeManager);
       DatanodeDetails staleNode =
-          TestUtils.getDatanodeDetails(nodeManager);
+          TestUtils.createRandomDatanodeAndRegister(nodeManager);
       DatanodeDetails deadNode =
-          TestUtils.getDatanodeDetails(nodeManager);
+          TestUtils.createRandomDatanodeAndRegister(nodeManager);
       nodeManager.processHeartbeat(healthyNode);
       nodeManager.processHeartbeat(staleNode);
       nodeManager.processHeartbeat(deadNode);
@@ -693,8 +695,9 @@ public class TestNodeManager {
       count) {
     List<DatanodeDetails> list = new LinkedList<>();
     for (int x = 0; x < count; x++) {
-      list.add(TestUtils.getDatanodeDetails(nodeManager, UUID.randomUUID()
-          .toString()));
+      DatanodeDetails datanodeDetails = TestUtils
+          .createRandomDatanodeAndRegister(nodeManager);
+      list.add(datanodeDetails);
     }
     return list;
   }
@@ -876,8 +879,8 @@ public class TestNodeManager {
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       nodeManager.setMinimumChillModeNodes(10);
-      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
-          nodeManager);
+      DatanodeDetails datanodeDetails = TestUtils
+          .createRandomDatanodeAndRegister(nodeManager);
       nodeManager.processHeartbeat(datanodeDetails);
       String status = nodeManager.getChillModeStatus();
       Assert.assertThat(status, containsString("Still in chill " +
@@ -904,7 +907,8 @@ public class TestNodeManager {
 
       // Assert that node manager force enter cannot be overridden by nodes HBs.
       for (int x = 0; x < 20; x++) {
-        DatanodeDetails datanode = TestUtils.getDatanodeDetails(nodeManager);
+        DatanodeDetails datanode = TestUtils
+            .createRandomDatanodeAndRegister(nodeManager);
         nodeManager.processHeartbeat(datanode);
       }
 
@@ -943,14 +947,13 @@ public class TestNodeManager {
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       for (int x = 0; x < nodeCount; x++) {
-        DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
-            nodeManager);
-        String dnId = datanodeDetails.getUuidString();
+        DatanodeDetails datanodeDetails = TestUtils
+            .createRandomDatanodeAndRegister(nodeManager);
+        UUID dnId = datanodeDetails.getUuid();
         long free = capacity - used;
         String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-        List<StorageReportProto> reports = TestUtils
-            .createStorageReport(capacity, used, free, storagePath,
-                null, dnId, 1);
+        StorageReportProto report = TestUtils
+            .createStorageReport(dnId, storagePath, capacity, used, free, null);
         nodeManager.processHeartbeat(datanodeDetails);
       }
       //TODO: wait for heartbeat to be processed
@@ -990,17 +993,17 @@ public class TestNodeManager {
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       DatanodeDetails datanodeDetails =
-          TestUtils.getDatanodeDetails(nodeManager);
+          TestUtils.createRandomDatanodeAndRegister(nodeManager);
       final long capacity = 2000;
       final long usedPerHeartbeat = 100;
-      String dnId = datanodeDetails.getUuidString();
+      UUID dnId = datanodeDetails.getUuid();
       for (int x = 0; x < heartbeatCount; x++) {
         long scmUsed = x * usedPerHeartbeat;
         long remaining = capacity - scmUsed;
         String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-        List<StorageReportProto> reports = TestUtils
-            .createStorageReport(capacity, scmUsed, remaining, storagePath,
-                null, dnId, 1);
+        StorageReportProto report = TestUtils
+            .createStorageReport(dnId, storagePath, capacity, scmUsed,
+                remaining, null);
 
         nodeManager.processHeartbeat(datanodeDetails);
         Thread.sleep(100);
@@ -1106,21 +1109,20 @@ public class TestNodeManager {
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
 
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
-    String dnId = datanodeDetails.getUuidString();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
+    UUID dnId = datanodeDetails.getUuid();
     String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-    List<StorageReportProto> reports =
-        TestUtils.createStorageReport(100, 10, 90,
-            storagePath, null, dnId, 1);
+    StorageReportProto report =
+        TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null);
 
     EventQueue eq = new EventQueue();
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       eq.addHandler(DATANODE_COMMAND, nodemanager);
 
       nodemanager
-          .register(datanodeDetails, TestUtils.createNodeReport(reports));
+          .register(datanodeDetails, TestUtils.createNodeReport(report));
       eq.fireEvent(DATANODE_COMMAND,
-          new CommandForDatanode(datanodeDetails.getUuid(),
+          new CommandForDatanode<>(datanodeDetails.getUuid(),
               new CloseContainerCommand(1L, ReplicationType.STAND_ALONE)));
 
       eq.processAll(1000L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 3cbde4b..e2e89ab 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -17,7 +17,6 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.UUID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -55,24 +54,22 @@ public class TestNodeReportHandler implements EventPublisher {
 
   @Test
   public void testNodeReport() throws IOException {
-    DatanodeDetails dn = TestUtils.getDatanodeDetails();
-    List<StorageReportProto> reports =
-        TestUtils.createStorageReport(100, 10, 90, storagePath, null,
-            dn.getUuid().toString(), 1);
+    DatanodeDetails dn = TestUtils.randomDatanodeDetails();
+    StorageReportProto storageOne = TestUtils
+        .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
 
     nodeReportHandler.onMessage(
-        getNodeReport(dn, reports), this);
+        getNodeReport(dn, storageOne), this);
     SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
 
     Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100);
     Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90);
     Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10);
 
-    reports =
-        TestUtils.createStorageReport(100, 10, 90, storagePath, null,
-            dn.getUuid().toString(), 2);
+    StorageReportProto storageTwo = TestUtils
+        .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
     nodeReportHandler.onMessage(
-        getNodeReport(dn, reports), this);
+        getNodeReport(dn, storageOne, storageTwo), this);
     nodeMetric = nodeManager.getNodeStat(dn);
 
     Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200);
@@ -82,7 +79,7 @@ public class TestNodeReportHandler implements EventPublisher {
   }
 
   private NodeReportFromDatanode getNodeReport(DatanodeDetails dn,
-      List<StorageReportProto> reports) {
+      StorageReportProto... reports) {
     NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports);
     return new NodeReportFromDatanode(dn, nodeReportProto);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 072dee7..623fc16 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -139,18 +139,17 @@ public class TestSCMNodeStorageStatMap {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     map.insertNewDatanode(key, reportSet);
     Assert.assertTrue(map.isKnownDatanode(key));
-    String storageId = UUID.randomUUID().toString();
+    UUID storageId = UUID.randomUUID();
     String path =
         GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
     StorageLocationReport report = reportSet.iterator().next();
     long reportCapacity = report.getCapacity();
     long reportScmUsed = report.getScmUsed();
     long reportRemaining = report.getRemaining();
-    List<StorageReportProto> reports = TestUtils
-        .createStorageReport(reportCapacity, reportScmUsed, reportRemaining,
-            path, null, storageId, 1);
+    StorageReportProto storageReport = TestUtils.createStorageReport(storageId,
+        path, reportCapacity, reportScmUsed, reportRemaining, null);
     StorageReportResult result =
-        map.processNodeReport(key, TestUtils.createNodeReport(reports));
+        map.processNodeReport(key, TestUtils.createNodeReport(storageReport));
     Assert.assertEquals(result.getStatus(),
         SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
     StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
@@ -162,8 +161,8 @@ public class TestSCMNodeStorageStatMap {
         SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
 
     reportList.add(TestUtils
-        .createStorageReport(reportCapacity, reportCapacity, 0, path, null,
-            UUID.randomUUID().toString(), 1).get(0));
+        .createStorageReport(UUID.randomUUID(), path, reportCapacity,
+            reportCapacity, 0, null));
     result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
     Assert.assertEquals(result.getStatus(),
         SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
index 1b79ebf..6a0b909 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
@@ -75,7 +75,7 @@ public class TestSCMDatanodeHeartbeatDispatcher {
           }
         });
 
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
 
     SCMHeartbeatRequestProto heartbeat =
         SCMHeartbeatRequestProto.newBuilder()
@@ -121,7 +121,7 @@ public class TestSCMDatanodeHeartbeatDispatcher {
           }
         });
 
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
 
     SCMHeartbeatRequestProto heartbeat =
         SCMHeartbeatRequestProto.newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 6619d26..e24e73e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -16,15 +16,13 @@
  */
 package org.apache.hadoop.ozone.container.common;
 
+import java.util.List;
 import java.util.Map;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
 import org.apache.hadoop.hdds.protocol.proto.
@@ -44,8 +42,6 @@ import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
@@ -57,8 +53,6 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
 import org.apache.hadoop.ozone.container.common.statemachine
     .DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine
@@ -84,10 +78,8 @@ import static org.mockito.Mockito.mock;
 
 import java.io.File;
 import java.net.InetSocketAddress;
-import java.util.List;
 import java.util.UUID;
 
-import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.ozone.container.common.ContainerTestUtils
@@ -159,7 +151,7 @@ public class TestEndPoint {
     OzoneConfiguration conf = SCMTestUtils.getConf();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, 1000)) {
-      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
           conf);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
@@ -183,8 +175,8 @@ public class TestEndPoint {
         serverAddress, 1000)) {
       GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
           .captureLogs(VersionEndpointTask.LOG);
-      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
-          conf);
+      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils
+          .randomDatanodeDetails(), conf);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
@@ -235,7 +227,7 @@ public class TestEndPoint {
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         nonExistentServerAddress, 1000)) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
           conf);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
@@ -262,7 +254,7 @@ public class TestEndPoint {
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, (int) rpcTimeout)) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
           conf);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
@@ -280,14 +272,14 @@ public class TestEndPoint {
 
   @Test
   public void testRegister() throws Exception {
-    DatanodeDetails nodeToRegister = getDatanodeDetails();
+    DatanodeDetails nodeToRegister = TestUtils.randomDatanodeDetails();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(
         SCMTestUtils.getConf(), serverAddress, 1000)) {
       SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
           .register(nodeToRegister.getProtoBufMessage(), TestUtils
                   .createNodeReport(
-                      getStorageReports(nodeToRegister.getUuidString())),
-              createContainerReport(10, nodeToRegister));
+                      getStorageReports(nodeToRegister.getUuid())),
+              TestUtils.getRandomContainerReports(10));
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(nodeToRegister.getUuidString(),
           responseProto.getDatanodeUUID());
@@ -298,9 +290,9 @@ public class TestEndPoint {
     }
   }
 
-  private List<StorageReportProto> getStorageReports(String id) {
+  private StorageReportProto getStorageReports(UUID id) {
     String storagePath = testDir.getAbsolutePath() + "/" + id;
-    return TestUtils.createStorageReport(100, 10, 90, storagePath, null, id, 1);
+    return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null);
   }
 
   private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
@@ -312,13 +304,13 @@ public class TestEndPoint {
     rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER);
     OzoneContainer ozoneContainer = mock(OzoneContainer.class);
     when(ozoneContainer.getNodeReport()).thenReturn(TestUtils
-        .createNodeReport(getStorageReports(UUID.randomUUID().toString())));
+        .createNodeReport(getStorageReports(UUID.randomUUID())));
     when(ozoneContainer.getContainerReport()).thenReturn(
-        createContainerReport(10, null));
+        TestUtils.getRandomContainerReports(10));
     RegisterEndpointTask endpointTask =
         new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer);
     if (!clearDatanodeDetails) {
-      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
       endpointTask.setDatanodeDetails(datanodeDetails);
     }
     endpointTask.call();
@@ -371,15 +363,14 @@ public class TestEndPoint {
 
   @Test
   public void testHeartbeat() throws Exception {
-    DatanodeDetails dataNode = getDatanodeDetails();
+    DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
     try (EndpointStateMachine rpcEndPoint =
              createEndpoint(SCMTestUtils.getConf(),
                  serverAddress, 1000)) {
-      String storageId = UUID.randomUUID().toString();
       SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
           .setDatanodeDetails(dataNode.getProtoBufMessage())
           .setNodeReport(TestUtils.createNodeReport(
-              getStorageReports(storageId)))
+              getStorageReports(UUID.randomUUID())))
           .build();
 
       SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
@@ -391,11 +382,10 @@ public class TestEndPoint {
 
   @Test
   public void testHeartbeatWithCommandStatusReport() throws Exception {
-    DatanodeDetails dataNode = getDatanodeDetails();
+    DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
     try (EndpointStateMachine rpcEndPoint =
         createEndpoint(SCMTestUtils.getConf(),
             serverAddress, 1000)) {
-      String storageId = UUID.randomUUID().toString();
       // Add some scmCommands for heartbeat response
       addScmCommands();
 
@@ -403,7 +393,7 @@ public class TestEndPoint {
       SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
           .setDatanodeDetails(dataNode.getProtoBufMessage())
           .setNodeReport(TestUtils.createNodeReport(
-              getStorageReports(storageId)))
+              getStorageReports(UUID.randomUUID())))
           .build();
 
       SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
@@ -482,11 +472,11 @@ public class TestEndPoint {
 
     // Create a datanode state machine for stateConext used by endpoint task
     try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
-        TestUtils.getDatanodeDetails(), conf);
-        EndpointStateMachine rpcEndPoint =
+        TestUtils.randomDatanodeDetails(), conf);
+         EndpointStateMachine rpcEndPoint =
             createEndpoint(conf, scmAddress, rpcTimeout)) {
       HddsProtos.DatanodeDetailsProto datanodeDetailsProto =
-          getDatanodeDetails().getProtoBufMessage();
+          TestUtils.randomDatanodeDetails().getProtoBufMessage();
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.HEARTBEAT);
 
       final StateContext stateContext =
@@ -530,26 +520,4 @@ public class TestEndPoint {
         lessThanOrEqualTo(rpcTimeout + tolerance));
   }
 
-  private ContainerReportsProto createContainerReport(
-      int count, DatanodeDetails datanodeDetails) {
-    StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder
-        reportsBuilder = StorageContainerDatanodeProtocolProtos
-        .ContainerReportsProto.newBuilder();
-    for (int x = 0; x < count; x++) {
-      long containerID = RandomUtils.nextLong();
-      ContainerReport report = new ContainerReport(containerID,
-            DigestUtils.sha256Hex("Simulated"));
-      report.setKeyCount(1000);
-      report.setSize(OzoneConsts.GB * 5);
-      report.setBytesUsed(OzoneConsts.GB * 2);
-      report.setReadCount(100);
-      report.setReadBytes(OzoneConsts.GB * 1);
-      report.setWriteCount(50);
-      report.setWriteBytes(OzoneConsts.GB * 2);
-
-      reportsBuilder.addReports(report.getProtoBufMessage());
-    }
-    return reportsBuilder.build();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 50cdd54..3c9e0c3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -109,9 +109,9 @@ public class TestMiniOzoneCluster {
   @Test
   public void testDatanodeIDPersistent() throws Exception {
     // Generate IDs for testing
-    DatanodeDetails id1 = TestUtils.getDatanodeDetails();
-    DatanodeDetails id2 = TestUtils.getDatanodeDetails();
-    DatanodeDetails id3 = TestUtils.getDatanodeDetails();
+    DatanodeDetails id1 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails id2 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails id3 = TestUtils.randomDatanodeDetails();
     id1.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 1));
     id2.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 2));
     id3.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 3));
@@ -162,11 +162,11 @@ public class TestMiniOzoneCluster {
         true);
     try (
         DatanodeStateMachine sm1 = new DatanodeStateMachine(
-            TestUtils.getDatanodeDetails(), ozoneConf);
+            TestUtils.randomDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm2 = new DatanodeStateMachine(
-            TestUtils.getDatanodeDetails(), ozoneConf);
+            TestUtils.randomDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm3 = new DatanodeStateMachine(
-            TestUtils.getDatanodeDetails(), ozoneConf)
+            TestUtils.randomDatanodeDetails(), ozoneConf)
     ) {
       HashSet<Integer> ports = new HashSet<Integer>();
       assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
@@ -185,11 +185,11 @@ public class TestMiniOzoneCluster {
     ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
     try (
         DatanodeStateMachine sm1 = new DatanodeStateMachine(
-            TestUtils.getDatanodeDetails(), ozoneConf);
+            TestUtils.randomDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm2 = new DatanodeStateMachine(
-            TestUtils.getDatanodeDetails(), ozoneConf);
+            TestUtils.randomDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm3 = new DatanodeStateMachine(
-            TestUtils.getDatanodeDetails(), ozoneConf)
+            TestUtils.randomDatanodeDetails(), ozoneConf)
     ) {
       HashSet<Integer> ports = new HashSet<Integer>();
       assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
@@ -204,7 +204,7 @@ public class TestMiniOzoneCluster {
   private void createMalformedIDFile(File malformedFile)
       throws IOException{
     malformedFile.delete();
-    DatanodeDetails id = TestUtils.getDatanodeDetails();
+    DatanodeDetails id = TestUtils.randomDatanodeDetails();
     ContainerUtils.writeDatanodeDetailsTo(id, malformedFile);
 
     FileOutputStream out = new FileOutputStream(malformedFile);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index b1c2065..13ed192 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -72,7 +72,7 @@ public class TestContainerMetrics {
       conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
           interval);
 
-      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
       conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
       VolumeSet volumeSet = new VolumeSet(
           datanodeDetails.getUuidString(), conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 18b325b..1522283 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -64,7 +64,7 @@ public class TestOzoneContainer {
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
 
-      container = new OzoneContainer(TestUtils.getDatanodeDetails(),
+      container = new OzoneContainer(TestUtils.randomDatanodeDetails(),
           conf);
       //Setting scmId, as we start manually ozone container.
       container.getDispatcher().setScmId(UUID.randomUUID().toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 3605677..bdb26fb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -102,7 +102,7 @@ public class TestContainerServer {
 
   @Test
   public void testClientServer() throws Exception {
-    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
     runTestClientServer(1,
         (pipeline, conf) -> conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
             pipeline.getLeader()
@@ -216,7 +216,7 @@ public class TestContainerServer {
       HddsDispatcher dispatcher = new HddsDispatcher(
           conf, mock(ContainerSet.class), mock(VolumeSet.class));
       dispatcher.init();
-      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
       server = new XceiverServer(datanodeDetails, conf, dispatcher);
       client = new XceiverClient(pipeline, conf);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] hadoop git commit: YARN-7133. Clean up lock-try order in fair scheduler. (Szilard Nemeth via Haibo Chen)

Posted by in...@apache.org.
YARN-7133. Clean up lock-try order in fair scheduler. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea2c6c8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea2c6c8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea2c6c8c

Branch: refs/heads/HADOOP-15461
Commit: ea2c6c8c9a55813a19b3dbd0d29747d6a7739030
Parents: e673dd1
Author: Haibo Chen <ha...@apache.org>
Authored: Tue Jul 24 12:46:15 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Tue Jul 24 12:46:59 2018 -0700

----------------------------------------------------------------------
 .../scheduler/fair/FairScheduler.java           | 36 ++++++++++----------
 1 file changed, 18 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea2c6c8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index eb9f6af..20d1afe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -467,8 +467,8 @@ public class FairScheduler extends
       return;
     }
 
+    writeLock.lock();
     try {
-      writeLock.lock();
       RMApp rmApp = rmContext.getRMApps().get(applicationId);
       FSLeafQueue queue = assignToQueue(rmApp, queueName, user);
       if (queue == null) {
@@ -550,8 +550,8 @@ public class FairScheduler extends
       ApplicationAttemptId applicationAttemptId,
       boolean transferStateFromPreviousAttempt,
       boolean isAttemptRecovering) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       SchedulerApplication<FSAppAttempt> application = applications.get(
           applicationAttemptId.getApplicationId());
       String user = application.getUser();
@@ -653,8 +653,8 @@ public class FairScheduler extends
   private void removeApplicationAttempt(
       ApplicationAttemptId applicationAttemptId,
       RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       LOG.info("Application " + applicationAttemptId + " is done. finalState="
               + rmAppAttemptFinalState);
       FSAppAttempt attempt = getApplicationAttempt(applicationAttemptId);
@@ -720,8 +720,8 @@ public class FairScheduler extends
   protected void completedContainerInternal(
       RMContainer rmContainer, ContainerStatus containerStatus,
       RMContainerEventType event) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       Container container = rmContainer.getContainer();
 
       // Get the application for the finished container
@@ -768,8 +768,8 @@ public class FairScheduler extends
 
   private void addNode(List<NMContainerStatus> containerReports,
       RMNode node) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       FSSchedulerNode schedulerNode = new FSSchedulerNode(node,
           usePortForNodeName);
       nodeTracker.addNode(schedulerNode);
@@ -790,8 +790,8 @@ public class FairScheduler extends
   }
 
   private void removeNode(RMNode rmNode) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       NodeId nodeId = rmNode.getNodeID();
       FSSchedulerNode node = nodeTracker.getNode(nodeId);
       if (node == null) {
@@ -988,8 +988,8 @@ public class FairScheduler extends
 
   @Override
   protected void nodeUpdate(RMNode nm) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       long start = getClock().getTime();
       super.nodeUpdate(nm);
 
@@ -1089,8 +1089,8 @@ public class FairScheduler extends
 
   @VisibleForTesting
   void attemptScheduling(FSSchedulerNode node) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       if (rmContext.isWorkPreservingRecoveryEnabled() && !rmContext
           .isSchedulerReadyForAllocatingContainers()) {
         return;
@@ -1305,8 +1305,8 @@ public class FairScheduler extends
   private String resolveReservationQueueName(String queueName,
       ApplicationId applicationId, ReservationId reservationID,
       boolean isRecovering) {
+    readLock.lock();
     try {
-      readLock.lock();
       FSQueue queue = queueMgr.getQueue(queueName);
       if ((queue == null) || !allocConf.isReservable(queue.getQueueName())) {
         return queueName;
@@ -1372,8 +1372,8 @@ public class FairScheduler extends
 
   @SuppressWarnings("deprecation")
   private void initScheduler(Configuration conf) throws IOException {
+    writeLock.lock();
     try {
-      writeLock.lock();
       this.conf = new FairSchedulerConfiguration(conf);
       validateConf(this.conf);
       authorizer = YarnAuthorizationProvider.getInstance(conf);
@@ -1464,8 +1464,8 @@ public class FairScheduler extends
   }
 
   private void startSchedulerThreads() {
+    writeLock.lock();
     try {
-      writeLock.lock();
       Preconditions.checkNotNull(allocsLoader, "allocsLoader is null");
       if (continuousSchedulingEnabled) {
         Preconditions.checkNotNull(schedulingThread,
@@ -1499,8 +1499,8 @@ public class FairScheduler extends
   @SuppressWarnings("deprecation")
   @Override
   public void serviceStop() throws Exception {
+    writeLock.lock();
     try {
-      writeLock.lock();
       if (continuousSchedulingEnabled) {
         if (schedulingThread != null) {
           schedulingThread.interrupt();
@@ -1562,8 +1562,8 @@ public class FairScheduler extends
   @Override
   public boolean checkAccess(UserGroupInformation callerUGI,
       QueueACL acl, String queueName) {
+    readLock.lock();
     try {
-      readLock.lock();
       FSQueue queue = getQueueManager().getQueue(queueName);
       if (queue == null) {
         if (LOG.isDebugEnabled()) {
@@ -1691,8 +1691,8 @@ public class FairScheduler extends
   @Override
   public String moveApplication(ApplicationId appId,
       String queueName) throws YarnException {
+    writeLock.lock();
     try {
-      writeLock.lock();
       SchedulerApplication<FSAppAttempt> app = applications.get(appId);
       if (app == null) {
         throw new YarnException("App to be moved " + appId + " not found.");
@@ -1700,8 +1700,8 @@ public class FairScheduler extends
       FSAppAttempt attempt = (FSAppAttempt) app.getCurrentAppAttempt();
       // To serialize with FairScheduler#allocate, synchronize on app attempt
 
+      attempt.getWriteLock().lock();
       try {
-        attempt.getWriteLock().lock();
         FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
         // Check if the attempt is already stopped: don't move stopped app
         // attempt. The attempt has already been removed from all queues.
@@ -1737,8 +1737,8 @@ public class FairScheduler extends
   @Override
   public void preValidateMoveApplication(ApplicationId appId, String newQueue)
       throws YarnException {
+    writeLock.lock();
     try {
-      writeLock.lock();
       SchedulerApplication<FSAppAttempt> app = applications.get(appId);
       if (app == null) {
         throw new YarnException("App to be moved " + appId + " not found.");
@@ -1747,8 +1747,8 @@ public class FairScheduler extends
       FSAppAttempt attempt = app.getCurrentAppAttempt();
       // To serialize with FairScheduler#allocate, synchronize on app attempt
 
+      attempt.getWriteLock().lock();
       try {
-        attempt.getWriteLock().lock();
         FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
         String destQueueName = handleMoveToPlanQueue(newQueue);
         FSLeafQueue targetQueue = queueMgr.getLeafQueue(destQueueName, false);
@@ -1869,8 +1869,8 @@ public class FairScheduler extends
   @Override
   public void updateNodeResource(RMNode nm,
       ResourceOption resourceOption) {
+    writeLock.lock();
     try {
-      writeLock.lock();
       super.updateNodeResource(nm, resourceOption);
       updateRootQueueMetrics();
       queueMgr.getRootQueue().setSteadyFairShare(getClusterResource());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org