You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xk...@apache.org on 2018/05/04 19:27:29 UTC

[01/50] [abbrv] hadoop git commit: YARN-8204. Added a flag to disable YARN service upgrade. Contributed by Chandni Singh

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 f8ee2123d -> f7f27391e


YARN-8204.  Added a flag to disable YARN service upgrade.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14b47990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14b47990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14b47990

Branch: refs/heads/HDFS-12943
Commit: 14b47990af39de71b0a09d995208f45ea3b79c23
Parents: 9ab3f97
Author: Eric Yang <ey...@apache.org>
Authored: Fri Apr 27 12:23:56 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Fri Apr 27 12:24:43 2018 -0400

----------------------------------------------------------------------
 .../yarn/service/client/ServiceClient.java      |  7 +++++
 .../yarn/service/conf/YarnServiceConf.java      |  7 +++++
 .../yarn/service/exceptions/ErrorStrings.java   |  2 ++
 .../yarn/service/TestYarnNativeServices.java    |  1 +
 .../yarn/service/client/TestServiceClient.java  | 28 +++++++++++++++++---
 5 files changed, 41 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b47990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 52cd369..8dd5342 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.yarn.service.containerlaunch.ClasspathConstructor;
 import org.apache.hadoop.yarn.service.containerlaunch.JavaCommandLineBuilder;
 import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
 import org.apache.hadoop.yarn.service.exceptions.BadConfigException;
+import org.apache.hadoop.yarn.service.exceptions.ErrorStrings;
 import org.apache.hadoop.yarn.service.exceptions.SliderException;
 import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
 import org.apache.hadoop.yarn.service.provider.ProviderUtils;
@@ -224,6 +225,12 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
 
   public int initiateUpgrade(Service service) throws YarnException,
       IOException {
+    boolean upgradeEnabled = getConfig().getBoolean(
+        YARN_SERVICE_UPGRADE_ENABLED,
+        YARN_SERVICE_UPGRADE_ENABLED_DEFAULT);
+    if (!upgradeEnabled) {
+      throw new YarnException(ErrorStrings.SERVICE_UPGRADE_DISABLED);
+    }
     Service persistedService =
         ServiceApiUtil.loadService(fs, service.getName());
     if (!StringUtils.isEmpty(persistedService.getId())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b47990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
index 55a3d70..13ed1aa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
@@ -128,6 +128,13 @@ public class YarnServiceConf {
       YARN_SERVICE_PREFIX + "container-health-threshold.";
 
   /**
+   * Upgrade feature enabled for services.
+   */
+  public static final String YARN_SERVICE_UPGRADE_ENABLED =
+      "yarn.service.upgrade.enabled";
+  public static final boolean YARN_SERVICE_UPGRADE_ENABLED_DEFAULT = false;
+
+  /**
    * The container health threshold percent when explicitly set for a specific
    * component or globally for all components, will schedule a health check
    * monitor to periodically check for the percentage of healthy containers. It

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b47990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java
index 83658c8..6ae124f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java
@@ -39,4 +39,6 @@ public interface ErrorStrings {
     "Too many arguments";
   String ERROR_DUPLICATE_ENTRY = "Duplicate entry for ";
 
+  String SERVICE_UPGRADE_DISABLED = "Service upgrade is disabled.";
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b47990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index ab7cb62..5b608e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -375,6 +375,7 @@ public class TestYarnNativeServices extends ServiceTestUtils {
   @Test(timeout = 200000)
   public void testUpgrade() throws Exception {
     setupInternal(NUM_NMS);
+    getConf().setBoolean(YARN_SERVICE_UPGRADE_ENABLED, true);
     ServiceClient client = createClient(getConf());
 
     Service service = createExampleApplication();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b47990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
index 3e3280b..d3664ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Container;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.service.exceptions.ErrorStrings;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.junit.Assert;
 import org.junit.Rule;
@@ -67,9 +69,26 @@ public class TestServiceClient {
       new ServiceTestUtils.ServiceFSWatcher();
 
   @Test
+  public void testUpgradeDisabledByDefault() throws Exception {
+    Service service = createService();
+    ServiceClient client = MockServiceClient.create(rule, service, false);
+
+    //upgrade the service
+    service.setVersion("v2");
+    try {
+      client.initiateUpgrade(service);
+    } catch (YarnException ex) {
+      Assert.assertEquals(ErrorStrings.SERVICE_UPGRADE_DISABLED,
+          ex.getMessage());
+      return;
+    }
+    Assert.fail();
+  }
+
+  @Test
   public void testActionServiceUpgrade() throws Exception {
     Service service = createService();
-    ServiceClient client = MockServiceClient.create(rule, service);
+    ServiceClient client = MockServiceClient.create(rule, service, true);
 
     //upgrade the service
     service.setVersion("v2");
@@ -85,7 +104,7 @@ public class TestServiceClient {
   @Test
   public void testActionCompInstanceUpgrade() throws Exception {
     Service service = createService();
-    MockServiceClient client = MockServiceClient.create(rule, service);
+    MockServiceClient client = MockServiceClient.create(rule, service, true);
 
     //upgrade the service
     service.setVersion("v2");
@@ -127,7 +146,7 @@ public class TestServiceClient {
     }
 
     static MockServiceClient create(ServiceTestUtils.ServiceFSWatcher rule,
-        Service service)
+        Service service, boolean enableUpgrade)
         throws IOException, YarnException {
       MockServiceClient client = new MockServiceClient();
 
@@ -163,7 +182,8 @@ public class TestServiceClient {
       client.setFileSystem(rule.getFs());
       client.setYarnClient(yarnClient);
       client.service = service;
-
+      rule.getConf().setBoolean(YarnServiceConf.YARN_SERVICE_UPGRADE_ENABLED,
+          enableUpgrade);
       client.init(rule.getConf());
       client.start();
       client.actionCreate(service);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: YARN-8079. Support static and archive unmodified local resources in service AM. Contributed by Suma Shivaprasad

Posted by xk...@apache.org.
YARN-8079. Support static and archive unmodified local resources in service AM. Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6795f807
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6795f807
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6795f807

Branch: refs/heads/HDFS-12943
Commit: 6795f8072ffbe6138857e77d51af173f33e4e5c1
Parents: 502914c
Author: Billie Rinaldi <bi...@apache.org>
Authored: Fri May 4 09:27:07 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Fri May 4 09:27:07 2018 -0700

----------------------------------------------------------------------
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   2 +
 .../yarn/service/api/records/ConfigFile.java    |   3 +-
 .../yarn/service/conf/YarnServiceConstants.java |   1 +
 .../provider/AbstractClientProvider.java        |  23 ++-
 .../provider/AbstractProviderService.java       |   4 +
 .../yarn/service/provider/ProviderUtils.java    |  91 ++++++++--
 .../service/provider/TestProviderUtils.java     | 164 +++++++++++++++++++
 .../providers/TestAbstractClientProvider.java   |  44 +++++
 .../markdown/yarn-service/YarnServiceAPI.md     |   4 +-
 9 files changed, 321 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 8c5ad65..cea8296 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -475,6 +475,8 @@ definitions:
           - YAML
           - TEMPLATE
           - HADOOP_XML
+          - STATIC
+          - ARCHIVE
       dest_file:
         type: string
         description: The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers.  If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
index d3b18bc..623feed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
@@ -55,7 +55,8 @@ public class ConfigFile implements Serializable {
   @XmlEnum
   public enum TypeEnum {
     XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
-        "TEMPLATE"), HADOOP_XML("HADOOP_XML");
+        "TEMPLATE"), HADOOP_XML("HADOOP_XML"), STATIC("STATIC"), ARCHIVE(
+        "ARCHIVE");
 
     private String value;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
index 7b474f6..d081606 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
@@ -84,6 +84,7 @@ public interface YarnServiceConstants {
   String HADOOP_USER_NAME = "HADOOP_USER_NAME";
 
   String APP_CONF_DIR = "conf";
+  String APP_RESOURCES_DIR = "resources";
 
   String APP_LIB_DIR = "lib";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
index 26c332b..d16c698 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service.provider;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.service.api.records.Artifact;
@@ -86,8 +87,9 @@ public abstract class AbstractClientProvider {
       if (file.getType() == null) {
         throw new IllegalArgumentException("File type is empty");
       }
+      ConfigFile.TypeEnum fileType = file.getType();
 
-      if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE)) {
+      if (fileType.equals(ConfigFile.TypeEnum.TEMPLATE)) {
         if (StringUtils.isEmpty(file.getSrcFile()) &&
             !file.getProperties().containsKey(CONTENT)) {
           throw new IllegalArgumentException(MessageFormat.format("For {0} " +
@@ -96,6 +98,25 @@ public abstract class AbstractClientProvider {
                   "the 'properties' field of ConfigFile. ",
               ConfigFile.TypeEnum.TEMPLATE, CONTENT));
         }
+      } else if (fileType.equals(ConfigFile.TypeEnum.STATIC) || fileType.equals(
+          ConfigFile.TypeEnum.ARCHIVE)) {
+        if (!file.getProperties().isEmpty()) {
+          throw new IllegalArgumentException(String
+              .format("For %s format, should not specify any 'properties.'",
+                  fileType));
+        }
+
+        String srcFile = file.getSrcFile();
+        if (srcFile == null || srcFile.isEmpty()) {
+          throw new IllegalArgumentException(String.format(
+              "For %s format, should make sure that srcFile is specified",
+              fileType));
+        }
+        FileStatus fileStatus = fs.getFileStatus(new Path(srcFile));
+        if (fileStatus != null && fileStatus.isDirectory()) {
+          throw new IllegalArgumentException("srcFile=" + srcFile +
+              " is a directory, which is not supported.");
+        }
       }
       if (!StringUtils.isEmpty(file.getSrcFile())) {
         Path p = new Path(file.getSrcFile());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
index 560f421..5a17817 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
@@ -97,6 +97,10 @@ public abstract class AbstractProviderService implements ProviderService,
     ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
         compLaunchContext, tokensForSubstitution, instance, context);
 
+    // handles static files (like normal file / archive file) for localization.
+    ProviderUtils.handleStaticFilesForLocalization(launcher, fileSystem,
+        compLaunchContext);
+
     // substitute launch command
     String launchCommand = compLaunchContext.getLaunchCommand();
     // docker container may have empty commands

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
index 2fc8cfb..1ad5fd8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service.provider;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -198,6 +199,10 @@ public class ProviderUtils implements YarnServiceConstants {
 
     for (ConfigFile originalFile : compLaunchContext.getConfiguration()
         .getFiles()) {
+
+      if (isStaticFile(originalFile)) {
+        continue;
+      }
       ConfigFile configFile = originalFile.copy();
       String fileName = new Path(configFile.getDestFile()).getName();
 
@@ -207,7 +212,14 @@ public class ProviderUtils implements YarnServiceConstants {
             .replaceAll(Pattern.quote(token.getKey()), token.getValue()));
       }
 
+      /* When source file is not specified, write new configs
+       * to compInstanceDir/fileName
+       * When source file is specified, it reads and performs variable
+       * substitution and merges in new configs, and writes a new file to
+       * compInstanceDir/fileName.
+       */
       Path remoteFile = new Path(compInstanceDir, fileName);
+
       if (!fs.getFileSystem().exists(remoteFile)) {
         log.info("Saving config file on hdfs for component " + instance
             .getCompInstanceName() + ": " + configFile);
@@ -239,22 +251,79 @@ public class ProviderUtils implements YarnServiceConstants {
       // Add resource for localization
       LocalResource configResource =
           fs.createAmResource(remoteFile, LocalResourceType.FILE);
-      File destFile = new File(configFile.getDestFile());
+      Path destFile = new Path(configFile.getDestFile());
       String symlink = APP_CONF_DIR + "/" + fileName;
-      if (destFile.isAbsolute()) {
-        launcher.addLocalResource(symlink, configResource,
-            configFile.getDestFile());
-        log.info("Add config file for localization: " + symlink + " -> "
-            + configResource.getResource().getFile() + ", dest mount path: "
-            + configFile.getDestFile());
-      } else {
-        launcher.addLocalResource(symlink, configResource);
-        log.info("Add config file for localization: " + symlink + " -> "
-            + configResource.getResource().getFile());
+      addLocalResource(launcher, symlink, configResource, destFile);
+    }
+  }
+
+  public static synchronized void handleStaticFilesForLocalization(
+      AbstractLauncher launcher, SliderFileSystem fs, ContainerLaunchService
+      .ComponentLaunchContext componentLaunchCtx)
+      throws IOException {
+    for (ConfigFile staticFile :
+        componentLaunchCtx.getConfiguration().getFiles()) {
+      // Only handle static file here.
+      if (!isStaticFile(staticFile)) {
+        continue;
+      }
+
+      if (staticFile.getSrcFile() == null) {
+        // This should not happen, AbstractClientProvider should have checked
+        // this.
+        throw new IOException("srcFile is null, please double check.");
+      }
+      Path sourceFile = new Path(staticFile.getSrcFile());
+
+      // Output properties to sourceFile if not existed
+      if (!fs.getFileSystem().exists(sourceFile)) {
+        throw new IOException(
+            "srcFile=" + sourceFile + " doesn't exist, please double check.");
       }
+
+      FileStatus fileStatus = fs.getFileSystem().getFileStatus(sourceFile);
+      if (fileStatus.isDirectory()) {
+        throw new IOException("srcFile=" + sourceFile +
+            " is a directory, which is not supported.");
+      }
+
+      // Add resource for localization
+      LocalResource localResource = fs.createAmResource(sourceFile,
+          (staticFile.getType() == ConfigFile.TypeEnum.ARCHIVE ?
+              LocalResourceType.ARCHIVE :
+              LocalResourceType.FILE));
+      Path destFile = new Path(sourceFile.getName());
+      if (staticFile.getDestFile() != null && !staticFile.getDestFile()
+          .isEmpty()) {
+        destFile = new Path(staticFile.getDestFile());
+      }
+
+      String symlink = APP_RESOURCES_DIR + "/" + destFile.getName();
+      addLocalResource(launcher, symlink, localResource, destFile);
     }
   }
 
+  private static void addLocalResource(AbstractLauncher launcher,
+      String symlink, LocalResource localResource, Path destFile) {
+    if (destFile.isAbsolute()) {
+      launcher.addLocalResource(symlink, localResource, destFile.toString());
+      log.info("Added file for localization: "+ symlink +" -> " +
+          localResource.getResource().getFile() + ", dest mount path: " +
+          destFile);
+    } else{
+      launcher.addLocalResource(symlink, localResource);
+      log.info("Added file for localization: " + symlink+ " -> " +
+          localResource.getResource().getFile());
+    }
+  }
+
+  // Static file is files uploaded by users before launch the service. Which
+  // should be localized to container local disk without any changes.
+  private static boolean isStaticFile(ConfigFile file) {
+    return file.getType().equals(ConfigFile.TypeEnum.ARCHIVE) || file.getType()
+        .equals(ConfigFile.TypeEnum.STATIC);
+  }
+
   private static void resolvePropsInConfigFileAndSaveOnHdfs(SliderFileSystem fs,
       Map<String, String> tokensForSubstitution, ComponentInstance instance,
       ConfigFile configFile, String fileName, Path remoteFile)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
new file mode 100644
index 0000000..6e8bc43
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.provider;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+import org.apache.hadoop.yarn.service.api.records.Configuration;
+import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test functionality of ProviderUtils.
+ */
+public class TestProviderUtils {
+  @Test
+  public void testStaticFileLocalization() throws IOException {
+    // A bunch of mocks ...
+    ContainerLaunchService.ComponentLaunchContext compLaunchCtx =
+        mock(ContainerLaunchService.ComponentLaunchContext.class);
+    AbstractLauncher launcher = mock(AbstractLauncher.class);
+    SliderFileSystem sfs = mock(SliderFileSystem.class);
+    FileSystem fs = mock(FileSystem.class);
+    when(fs.getFileStatus(any(Path.class))).thenAnswer(
+        invocationOnMock -> new FileStatus(1L, false, 1, 1L, 1L,
+            (Path) invocationOnMock.getArguments()[0]));
+    when(fs.exists(any(Path.class))).thenReturn(true);
+    when(sfs.getFileSystem()).thenReturn(fs);
+    Configuration conf = mock(Configuration.class);
+    List<ConfigFile> configFileList = new ArrayList<>();
+    when(conf.getFiles()).thenReturn(configFileList);
+    when(compLaunchCtx.getConfiguration()).thenReturn(conf);
+    when(sfs.createAmResource(any(Path.class), any(LocalResourceType.class)))
+        .thenAnswer(invocationOnMock -> new LocalResource() {
+          @Override
+          public URL getResource() {
+            return URL.fromPath(((Path) invocationOnMock.getArguments()[0]));
+          }
+
+          @Override
+          public void setResource(URL resource) {
+
+          }
+
+          @Override
+          public long getSize() {
+            return 0;
+          }
+
+          @Override
+          public void setSize(long size) {
+
+          }
+
+          @Override
+          public long getTimestamp() {
+            return 0;
+          }
+
+          @Override
+          public void setTimestamp(long timestamp) {
+
+          }
+
+          @Override
+          public LocalResourceType getType() {
+            return (LocalResourceType) invocationOnMock.getArguments()[1];
+          }
+
+          @Override
+          public void setType(LocalResourceType type) {
+
+          }
+
+          @Override
+          public LocalResourceVisibility getVisibility() {
+            return null;
+          }
+
+          @Override
+          public void setVisibility(LocalResourceVisibility visibility) {
+
+          }
+
+          @Override
+          public String getPattern() {
+            return null;
+          }
+
+          @Override
+          public void setPattern(String pattern) {
+
+          }
+
+          @Override
+          public boolean getShouldBeUploadedToSharedCache() {
+            return false;
+          }
+
+          @Override
+          public void setShouldBeUploadedToSharedCache(
+              boolean shouldBeUploadedToSharedCache) {
+
+          }
+        });
+
+    // Initialize list of files.
+    //archive
+    configFileList.add(new ConfigFile().srcFile("hdfs://default/sourceFile1")
+        .destFile("destFile1").type(ConfigFile.TypeEnum.ARCHIVE));
+
+    //static file
+    configFileList.add(new ConfigFile().srcFile("hdfs://default/sourceFile2")
+        .destFile("folder/destFile_2").type(ConfigFile.TypeEnum.STATIC));
+
+    //This will be ignored since type is JSON
+    configFileList.add(new ConfigFile().srcFile("hdfs://default/sourceFile3")
+        .destFile("destFile3").type(ConfigFile.TypeEnum.JSON));
+    //No destination file specified
+    configFileList.add(new ConfigFile().srcFile("hdfs://default/sourceFile4")
+        .type(ConfigFile.TypeEnum.STATIC));
+
+    ProviderUtils.handleStaticFilesForLocalization(launcher, sfs,
+        compLaunchCtx);
+    Mockito.verify(launcher).addLocalResource(Mockito.eq("resources/destFile1"),
+        any(LocalResource.class));
+    Mockito.verify(launcher).addLocalResource(
+        Mockito.eq("resources/destFile_2"), any(LocalResource.class));
+    Mockito.verify(launcher).addLocalResource(
+        Mockito.eq("resources/sourceFile4"), any(LocalResource.class));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java
index 79406e9..1d6be91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.yarn.service.providers;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.service.api.records.Artifact;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
 import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
@@ -57,6 +59,7 @@ public class TestAbstractClientProvider {
   public void testConfigFiles() throws IOException {
     ClientProvider clientProvider = new ClientProvider();
     FileSystem mockFs = mock(FileSystem.class);
+    FileStatus mockFileStatus = mock(FileStatus.class);
     when(mockFs.exists(anyObject())).thenReturn(true);
 
     ConfigFile configFile = new ConfigFile();
@@ -114,5 +117,46 @@ public class TestAbstractClientProvider {
       Assert.fail(EXCEPTION_PREFIX + "duplicate dest file");
     } catch (IllegalArgumentException e) {
     }
+
+    configFiles.clear();
+    configFile = new ConfigFile();
+    configFile.setType(ConfigFile.TypeEnum.STATIC);
+    configFile.setSrcFile(null);
+    configFile.setDestFile("path/destfile3");
+    configFiles.add(configFile);
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "dest file with multiple path elements");
+    } catch (IllegalArgumentException e) {
+    }
+
+    configFile.setDestFile("/path/destfile3");
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "src file should be specified");
+    } catch (IllegalArgumentException e) {
+    }
+
+    //should succeed
+    configFile.setSrcFile("srcFile");
+    configFile.setDestFile("destfile3");
+    clientProvider.validateConfigFiles(configFiles, mockFs);
+
+    when(mockFileStatus.isDirectory()).thenReturn(true);
+    when(mockFs.getFileStatus(new Path("srcFile")))
+        .thenReturn(mockFileStatus).thenReturn(mockFileStatus);
+
+    configFiles.clear();
+    configFile = new ConfigFile();
+    configFile.setType(ConfigFile.TypeEnum.STATIC);
+    configFile.setSrcFile("srcFile");
+    configFile.setDestFile("destfile3");
+    configFiles.add(configFile);
+
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "src file is a directory");
+    } catch (IllegalArgumentException e) {
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index fab33c5..c648046 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -250,8 +250,8 @@ A config file that needs to be created and made available as a volume in a servi
 
 |Name|Description|Required|Schema|Default|
 |----|----|----|----|----|
-|type|Config file in the standard format like xml, properties, json, yaml, template.|false|enum (XML, PROPERTIES, JSON, YAML, TEMPLATE, HADOOP_XML)||
-|dest_file|The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers.  If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.|false|string||
+|type|Config file in the standard format like xml, properties, json, yaml, template or static/archive resource files. When static/archive types are specified, file must be uploaded to remote file system before launching the job, and YARN service framework will localize files prior to launching containers. Archive files are unwrapped during localization |false|enum (XML, PROPERTIES, JSON, YAML, TEMPLATE, ENV, HADOOP_XML, STATIC, ARCHIVE)||
+|dest_file|The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers.  If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf for all types other than static/archive. For static/archive resource types, the files are available under resources directory.|false|string||
 |src_file|This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.|false|string||
 |properties|A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.|false|object||
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HDDS-15. Add memory profiler support to Genesis. Contributed by Anu Engineer.

Posted by xk...@apache.org.
HDDS-15. Add memory profiler support to Genesis. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b63a0af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b63a0af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b63a0af

Branch: refs/heads/HDFS-12943
Commit: 6b63a0af9b29c231166d9af50d499a246cbbb755
Parents: 3b34fca
Author: Anu Engineer <ae...@apache.org>
Authored: Wed May 2 10:44:47 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed May 2 12:54:49 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ozone/genesis/Genesis.java    |  7 ++-
 .../ozone/genesis/GenesisMemoryProfiler.java    | 59 ++++++++++++++++++++
 2 files changed, 65 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b63a0af/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
index 5efa12a..0dc3db7 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
@@ -42,9 +42,12 @@ public final class Genesis {
         .include(BenchMarkMetadataStoreReads.class.getSimpleName())
         .include(BenchMarkMetadataStoreWrites.class.getSimpleName())
         .include(BenchMarkDatanodeDispatcher.class.getSimpleName())
-        .include(BenchMarkRocksDbStore.class.getSimpleName())
+// Commenting this test out, till we support either a command line or a config
+        // file based ability to run tests.
+//        .include(BenchMarkRocksDbStore.class.getSimpleName())
         .warmupIterations(5)
         .measurementIterations(20)
+        .addProfiler(GenesisMemoryProfiler.class)
         .shouldDoGC(true)
         .forks(1)
         .build();
@@ -52,3 +55,5 @@ public final class Genesis {
     new Runner(opt).run();
   }
 }
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b63a0af/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
new file mode 100644
index 0000000..090f1a7
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.genesis;
+
+import org.openjdk.jmh.infra.BenchmarkParams;
+import org.openjdk.jmh.infra.IterationParams;
+import org.openjdk.jmh.profile.InternalProfiler;
+import org.openjdk.jmh.results.AggregationPolicy;
+import org.openjdk.jmh.results.IterationResult;
+import org.openjdk.jmh.results.Result;
+import org.openjdk.jmh.results.ScalarResult;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ * Max memory profiler.
+ */
+public class GenesisMemoryProfiler implements InternalProfiler {
+  @Override
+  public void beforeIteration(BenchmarkParams benchmarkParams,
+      IterationParams iterationParams) {
+
+  }
+
+  @Override
+  public Collection<? extends Result> afterIteration(BenchmarkParams
+      benchmarkParams, IterationParams iterationParams, IterationResult
+      result) {
+    long totalHeap = Runtime.getRuntime().totalMemory();
+
+    Collection<ScalarResult> samples = new ArrayList<>();
+    samples.add(new ScalarResult("Max heap", totalHeap, "bytes",
+        AggregationPolicy.MAX));
+    return samples;
+  }
+
+  @Override
+  public String getDescription() {
+    return "Genesis Memory Profiler. Computes Max Memory used by a test.";
+  }
+}
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-8225. YARN precommit build failing in TestPlacementConstraintTransformations. Contributed by Shane Kumpf.

Posted by xk...@apache.org.
YARN-8225. YARN precommit build failing in TestPlacementConstraintTransformations. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c95eb81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c95eb81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c95eb81

Branch: refs/heads/HDFS-12943
Commit: 2c95eb8111a7b03fd4683f740123cd4720b62c3e
Parents: 4844406
Author: Weiwei Yang <ww...@apache.org>
Authored: Sat Apr 28 17:37:37 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Sat Apr 28 17:37:37 2018 +0800

----------------------------------------------------------------------
 .../yarn/api/resource/PlacementConstraintTransformations.java    | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c95eb81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
index c5d21af..a15b20a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.api.resource;
 import java.util.ListIterator;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
@@ -162,7 +163,8 @@ public class PlacementConstraintTransformations {
     public AbstractConstraint visit(CardinalityConstraint constraint) {
       return new SingleConstraint(constraint.getScope(),
           constraint.getMinCardinality(), constraint.getMaxCardinality(),
-          new TargetExpression(TargetExpression.TargetType.ALLOCATION_TAG, null,
+          new TargetExpression(TargetExpression.TargetType.ALLOCATION_TAG,
+              AllocationTagNamespaceType.SELF.toString(),
               constraint.getAllocationTags()));
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: YARN-2674. Fix distributed shell AM container relaunch during RM work preserving restart. Contributed by Shane Kumpf

Posted by xk...@apache.org.
YARN-2674. Fix distributed shell AM container relaunch during RM work preserving restart. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e1382ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e1382ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e1382ac

Branch: refs/heads/HDFS-12943
Commit: 4e1382aca4cf23ca229bdd24e0f143c22449b329
Parents: d6139c5
Author: Billie Rinaldi <bi...@apache.org>
Authored: Mon Apr 30 14:34:51 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Tue May 1 07:27:47 2018 -0700

----------------------------------------------------------------------
 .../distributedshell/ApplicationMaster.java     | 68 +++++++++++++-------
 .../distributedshell/TestDSAppMaster.java       |  8 +--
 2 files changed, 46 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e1382ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 75f4073..cca5676 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -31,6 +31,7 @@ import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -105,6 +106,7 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
@@ -1060,32 +1062,48 @@ public class ApplicationMaster {
     public void onContainersAllocated(List<Container> allocatedContainers) {
       LOG.info("Got response from RM for container ask, allocatedCnt="
           + allocatedContainers.size());
-      numAllocatedContainers.addAndGet(allocatedContainers.size());
       for (Container allocatedContainer : allocatedContainers) {
-        String yarnShellId = Integer.toString(yarnShellIdCounter);
-        yarnShellIdCounter++;
-        LOG.info("Launching shell command on a new container."
-            + ", containerId=" + allocatedContainer.getId()
-            + ", yarnShellId=" + yarnShellId
-            + ", containerNode=" + allocatedContainer.getNodeId().getHost()
-            + ":" + allocatedContainer.getNodeId().getPort()
-            + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress()
-            + ", containerResourceMemory"
-            + allocatedContainer.getResource().getMemorySize()
-            + ", containerResourceVirtualCores"
-            + allocatedContainer.getResource().getVirtualCores());
-        // + ", containerToken"
-        // +allocatedContainer.getContainerToken().getIdentifier().toString());
-
-        Thread launchThread = createLaunchContainerThread(allocatedContainer,
-            yarnShellId);
-
-        // launch and start the container on a separate thread to keep
-        // the main thread unblocked
-        // as all containers may not be allocated at one go.
-        launchThreads.add(launchThread);
-        launchedContainers.add(allocatedContainer.getId());
-        launchThread.start();
+        if (numAllocatedContainers.get() == numTotalContainers) {
+          LOG.info("The requested number of containers have been allocated."
+              + " Releasing the extra container allocation from the RM.");
+          amRMClient.releaseAssignedContainer(allocatedContainer.getId());
+        } else {
+          numAllocatedContainers.addAndGet(1);
+          String yarnShellId = Integer.toString(yarnShellIdCounter);
+          yarnShellIdCounter++;
+          LOG.info(
+              "Launching shell command on a new container."
+                  + ", containerId=" + allocatedContainer.getId()
+                  + ", yarnShellId=" + yarnShellId
+                  + ", containerNode="
+                  + allocatedContainer.getNodeId().getHost()
+                  + ":" + allocatedContainer.getNodeId().getPort()
+                  + ", containerNodeURI="
+                  + allocatedContainer.getNodeHttpAddress()
+                  + ", containerResourceMemory"
+                  + allocatedContainer.getResource().getMemorySize()
+                  + ", containerResourceVirtualCores"
+                  + allocatedContainer.getResource().getVirtualCores());
+
+          Thread launchThread =
+              createLaunchContainerThread(allocatedContainer, yarnShellId);
+
+          // launch and start the container on a separate thread to keep
+          // the main thread unblocked
+          // as all containers may not be allocated at one go.
+          launchThreads.add(launchThread);
+          launchedContainers.add(allocatedContainer.getId());
+          launchThread.start();
+
+          // Remove the corresponding request
+          Collection<AMRMClient.ContainerRequest> requests =
+              amRMClient.getMatchingRequests(
+                  allocatedContainer.getAllocationRequestId());
+          if (requests.iterator().hasNext()) {
+            AMRMClient.ContainerRequest request = requests.iterator().next();
+            amRMClient.removeContainerRequest(request);
+          }
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e1382ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
index f11bdf8..f2a8041 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
@@ -106,7 +106,6 @@ public class TestDSAppMaster {
     handler.onContainersAllocated(containers);
     Assert.assertEquals("Wrong container allocation count", 1,
         master.getAllocatedContainers());
-    Mockito.verifyZeroInteractions(mockClient);
     Assert.assertEquals("Incorrect number of threads launched", 1,
         master.threadsLaunched);
     Assert.assertEquals("Incorrect YARN Shell IDs",
@@ -121,15 +120,14 @@ public class TestDSAppMaster {
     ContainerId id4 = BuilderUtils.newContainerId(1, 1, 1, 4);
     containers.add(generateContainer(id4));
     handler.onContainersAllocated(containers);
-    Assert.assertEquals("Wrong final container allocation count", 4,
+    Assert.assertEquals("Wrong final container allocation count", 2,
         master.getAllocatedContainers());
 
-    Assert.assertEquals("Incorrect number of threads launched", 4,
+    Assert.assertEquals("Incorrect number of threads launched", 2,
         master.threadsLaunched);
 
     Assert.assertEquals("Incorrect YARN Shell IDs",
-        Arrays.asList("1", "2", "3", "4"), master.yarnShellIds);
-
+        Arrays.asList("1", "2"), master.yarnShellIds);
     // make sure we handle completion events correctly
     List<ContainerStatus> status = new ArrayList<>();
     status.add(generateContainerStatus(id1, ContainerExitStatus.SUCCESS));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: YARN-7818. Remove privileged operation warnings during container launch for the ContainerRuntimes. Contributed by Shane Kumpf

Posted by xk...@apache.org.
YARN-7818. Remove privileged operation warnings during container launch for the ContainerRuntimes. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/502914ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/502914ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/502914ca

Branch: refs/heads/HDFS-12943
Commit: 502914ca32ac02b19116fd681eb8301b92fccbb3
Parents: a3b416f
Author: Billie Rinaldi <bi...@apache.org>
Authored: Fri May 4 08:53:55 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Fri May 4 08:53:55 2018 -0700

----------------------------------------------------------------------
 .../linux/runtime/DefaultLinuxContainerRuntime.java   |  5 +++--
 .../linux/runtime/DockerLinuxContainerRuntime.java    | 14 +++++++-------
 2 files changed, 10 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/502914ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index d8db6ad..b5c933a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -108,6 +108,9 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
       launchOp.appendArgs(tcCommandFile);
     }
 
+    // Some failures here are acceptable. Let the calling executor decide.
+    launchOp.disableFailureLogging();
+
     //List<String> -> stored as List -> fetched/converted to List<String>
     //we can't do better here thanks to type-erasure
     @SuppressWarnings("unchecked")
@@ -118,8 +121,6 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
       privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
             launchOp, null, null, false, false);
     } catch (PrivilegedOperationException e) {
-      LOG.warn("Launch container failed. Exception: ", e);
-
       throw new ContainerExecutionException("Launch container failed", e
           .getExitCode(), e.getOutput(), e.getErrorOutput());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/502914ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index ec1d055..0bacd03 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -914,13 +914,13 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     PrivilegedOperation launchOp = buildLaunchOp(ctx,
         commandFile, runCommand);
 
+    // Some failures here are acceptable. Let the calling executor decide.
+    launchOp.disableFailureLogging();
+
     try {
       privilegedOperationExecutor.executePrivilegedOperation(null,
           launchOp, null, null, false, false);
     } catch (PrivilegedOperationException e) {
-      LOG.warn("Launch container failed. Exception: ", e);
-      LOG.info("Docker command used: " + runCommand);
-
       throw new ContainerExecutionException("Launch container failed", e
           .getExitCode(), e.getOutput(), e.getErrorOutput());
     }
@@ -943,14 +943,14 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       PrivilegedOperation launchOp = buildLaunchOp(ctx, commandFile,
           startCommand);
 
+      // Some failures here are acceptable. Let the calling executor decide.
+      launchOp.disableFailureLogging();
+
       try {
         privilegedOperationExecutor.executePrivilegedOperation(null,
             launchOp, null, null, false, false);
       } catch (PrivilegedOperationException e) {
-        LOG.warn("Relaunch container failed. Exception: ", e);
-        LOG.info("Docker command used: " + startCommand);
-
-        throw new ContainerExecutionException("Launch container failed", e
+        throw new ContainerExecutionException("Relaunch container failed", e
             .getExitCode(), e.getOutput(), e.getErrorOutput());
       }
     } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: YARN-8226. Improved anti-affinity description in YARN Service doc. Contributed by Gour Saha

Posted by xk...@apache.org.
YARN-8226. Improved anti-affinity description in YARN Service doc.
           Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76987372
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76987372
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76987372

Branch: refs/heads/HDFS-12943
Commit: 7698737207b01e80b1be2b4df60363f952a1c2b4
Parents: 7fe3214
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 3 13:35:40 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 3 13:35:40 2018 -0400

----------------------------------------------------------------------
 .../main/resources/definition/YARN-Services-Examples.md   | 10 +++++++---
 .../src/site/markdown/yarn-service/YarnServiceAPI.md      | 10 +++++++---
 2 files changed, 14 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76987372/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index a4ef2d2..83e558c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -351,9 +351,13 @@ POST URL - http://localhost:8088/app/v1/services
 ##### GET Response JSON
 GET URL - http://localhost:8088/app/v1/services/hello-world
 
-Note, that the 3 containers will come up on 3 different nodes. If there are less
-than 3 NMs running in the cluster, then all 3 container requests will not be
-fulfilled and the service will be in non-STABLE state.
+Note, for an anti-affinity component no more than 1 container will be allocated
+in a specific node. In this example, 3 containers have been requested by
+component "hello". All 3 containers were allocated because the cluster had 3 or
+more NMs. If the cluster had less than 3 NMs then less than 3 containers would
+be allocated. In cases when the number of allocated containers are less than the
+number of requested containers, the component and the service will be in
+non-STABLE state.
 
 ```json
 {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76987372/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index 496c1a1..fab33c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -766,9 +766,13 @@ POST URL - http://localhost:8088/app/v1/services
 ##### GET Response JSON
 GET URL - http://localhost:8088/app/v1/services/hello-world
 
-Note, that the 3 containers will come up on 3 different nodes. If there are less
-than 3 NMs running in the cluster, then all 3 container requests will not be
-fulfilled and the service will be in non-STABLE state.
+Note, for an anti-affinity component no more than 1 container will be allocated
+in a specific node. In this example, 3 containers have been requested by
+component "hello". All 3 containers were allocated because the cluster had 3 or
+more NMs. If the cluster had less than 3 NMs then less than 3 containers would
+be allocated. In cases when the number of allocated containers are less than the
+number of requested containers, the component and the service will be in
+non-STABLE state.
 
 ```json
 {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDFS-13434. RBF: Fix dead links in RBF document. Contributed by Chetna Chaudhari.

Posted by xk...@apache.org.
HDFS-13434. RBF: Fix dead links in RBF document. Contributed by Chetna Chaudhari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f469628b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f469628b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f469628b

Branch: refs/heads/HDFS-12943
Commit: f469628bba350ba79bc6a0d38f9dc1cb5eb65c77
Parents: 92c5331
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Apr 27 15:13:47 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Apr 27 15:13:47 2018 -0700

----------------------------------------------------------------------
 .../src/site/markdown/HDFSRouterFederation.md       | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f469628b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 43e89ed..70c6226 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -21,7 +21,7 @@ Introduction
 ------------
 
 NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests.
-The common solution is to split the filesystem into smaller subclusters [HDFS Federation](./Federation.html) and provide a federated view [ViewFs](./ViewFs.html).
+The common solution is to split the filesystem into smaller subclusters [HDFS Federation](../hadoop-hdfs/Federation.html) and provide a federated view [ViewFs](../hadoop-hdfs/ViewFs.html).
 The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them.
 
 
@@ -37,8 +37,8 @@ This layer must be scalable, highly available, and fault tolerant.
 
 This federation layer comprises multiple components.
 The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store.
-The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](./ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters.
-This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html).
+The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](../hadoop-hdfs/ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters.
+This approach has the same architecture as [YARN federation](../../hadoop-yarn/hadoop-yarn-site/Federation.html).
 
 ![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png)
 
@@ -140,7 +140,7 @@ Examples users may encounter include the following.
 ### Quota management
 Federation supports and controls global quota at mount table level.
 For performance reasons, the Router caches the quota usage and updates it periodically. These quota usage values
-will be used for quota-verification during each WRITE RPC call invoked in RouterRPCSever. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html)
+will be used for quota-verification during each WRITE RPC call invoked in RouterRPCSever. See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html)
 for the quota detail.
 
 ### State Store
@@ -163,7 +163,7 @@ The Routers discard the entries older than a certain threshold (e.g., ten Router
 
 * **Mount Table**:
 This table hosts the mapping between folders and subclusters.
-It is similar to the mount table in [ViewFs](.ViewFs.html) where it specifies the federated folder, the destination subcluster and the path in that folder.
+It is similar to the mount table in [ViewFs](../hadoop-hdfs/ViewFs.html) where it specifies the federated folder, the destination subcluster and the path in that folder.
 
 
 ### Security
@@ -175,7 +175,7 @@ Deployment
 
 By default, the Router is ready to take requests and monitor the NameNode in the local machine.
 It needs to know the State Store endpoint by setting `dfs.federation.router.store.driver.class`.
-The rest of the options are documented in [hdfs-default.xml](./hdfs-default.xml).
+The rest of the options are documented in [hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
 
 Once the Router is configured, it can be started:
 
@@ -187,7 +187,7 @@ And to stop it:
 
 ### Mount table management
 
-The mount table entries are pretty much the same as in [ViewFs](./ViewFs.html).
+The mount table entries are pretty much the same as in [ViewFs](../hadoop-hdfs/ViewFs.html).
 A good practice for simplifying the management is to name the federated namespace with the same names as the destination namespaces.
 For example, if we to mount `/data/app1` in the federated namespace, it is recommended to have that same name as in the destination namespace.
 
@@ -290,7 +290,7 @@ Router configuration
 --------------------
 
 One can add the configurations for Router-based federation to **hdfs-site.xml**.
-The main options are documented in [hdfs-default.xml](./hdfs-default.xml).
+The main options are documented in [hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
 The configuration values are described in this section.
 
 ### RPC server


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently (Contributed by Gabor Bota via Daniel Templeton)

Posted by xk...@apache.org.
HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently
(Contributed by Gabor Bota via Daniel Templeton)

Change-Id: I9921981dfa69669fe7912dd2a31ae8b638283204


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87c23ef6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87c23ef6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87c23ef6

Branch: refs/heads/HDFS-12943
Commit: 87c23ef643393c39e8353ca9f495b0c8f97cdbd9
Parents: f4d280f
Author: Daniel Templeton <te...@apache.org>
Authored: Wed May 2 16:54:42 2018 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Wed May 2 17:13:40 2018 -0700

----------------------------------------------------------------------
 .../hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java       | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c23ef6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
index da85b9b..0f90d82 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
@@ -182,7 +182,8 @@ public class RollingFileSystemSinkTestBase {
         .add(prefix + ".sink.mysink0.ignore-error", ignoreErrors)
         .add(prefix + ".sink.mysink0.allow-append", allowAppend)
         .add(prefix + ".sink.mysink0.roll-offset-interval-millis", 0)
-        .add(prefix + ".sink.mysink0.roll-interval", "1h");
+        .add(prefix + ".sink.mysink0.roll-interval", "1h")
+        .add("*.queue.capacity", 2);
 
     if (useSecureParams) {
       builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-8005. Add unit tests for queue priority with dominant resource calculator. (Zian Chen via wangda)

Posted by xk...@apache.org.
YARN-8005. Add unit tests for queue priority with dominant resource calculator. (Zian Chen via wangda)

Change-Id: I17a645f20869a1e5d86fa7a325c93fec908b91dc


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92c53314
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92c53314
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92c53314

Branch: refs/heads/HDFS-12943
Commit: 92c5331423afee172e843198f4824469eb92e89b
Parents: b1833d9
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Apr 27 13:08:43 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Apr 27 13:08:43 2018 -0700

----------------------------------------------------------------------
 .../TestPreemptionForQueueWithPriorities.java   | 147 +++++++++++++++++++
 1 file changed, 147 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92c53314/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
index 2b54d77..e9a8116 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
@@ -18,10 +18,16 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
 
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
@@ -30,8 +36,28 @@ import static org.mockito.Mockito.verify;
 
 public class TestPreemptionForQueueWithPriorities
     extends ProportionalCapacityPreemptionPolicyMockFramework {
+  // Initialize resource map
+  private Map<String, ResourceInformation> riMap = new HashMap<>();
+
   @Before
   public void setup() {
+
+    // Initialize mandatory resources
+    ResourceInformation memory = ResourceInformation.newInstance(
+        ResourceInformation.MEMORY_MB.getName(),
+        ResourceInformation.MEMORY_MB.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
+    ResourceInformation vcores = ResourceInformation.newInstance(
+        ResourceInformation.VCORES.getName(),
+        ResourceInformation.VCORES.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+    riMap.put(ResourceInformation.MEMORY_URI, memory);
+    riMap.put(ResourceInformation.VCORES_URI, vcores);
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
     super.setup();
     policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
   }
@@ -358,4 +384,125 @@ public class TestPreemptionForQueueWithPriorities
             getAppAttemptId(4))));
   }
 
+  @Test
+  public void testPriorityPreemptionWithMandatoryResourceForHierarchicalOfQueues()
+      throws Exception {
+    /**
+     * Queue structure is:
+     *
+     * <pre>
+     *           root
+     *           /  \
+     *          a    b
+     *        /  \  /  \
+     *       a1  a2 b1  b2
+     * </pre>
+     *
+     * a2 is underserved and need more resource. b2 will be preemptable.
+     */
+
+    String labelsConfig = "=100:200,true"; // default partition
+    String nodesConfig = "n1="; // only one node
+    String queuesConfig =
+        // guaranteed,max,used,pending
+        "root(=[100:200 100:200 100:200 100:200]);" + //root
+            "-a(=[50:100 100:200 20:40 60:100]){priority=1};" + // a
+            "--a1(=[10:20 100:200 10:30 30:20]){priority=1};" + // a1
+            "--a2(=[40:80 100:200 10:10 30:80]){priority=1};" + // a2
+            "-b(=[50:100 100:200 80:160 40:100]){priority=1};" + // b
+            "--b1(=[20:40 100:200 20:40 20:70]){priority=2};" + // b1
+            "--b2(=[30:60 100:200 60:120 20:30]){priority=1}";// b2
+
+    String appsConfig =
+        //queueName\t(priority,resource,host,expression,#repeat,reserved)
+        "a1\t(1,1:4,n1,,10,false);" + // app1 in a1
+            "a2\t(1,1:1,n1,,10,false);" + // app2 in a2
+            "b1\t(1,3:4,n1,,10,false);" + // app3 in b1
+            "b2\t(1,20:40,n1,,3,false)";  // app4 in b2
+
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, true);
+    policy.editSchedule();
+
+    // Preemption should first divide capacities between a / b, and b1 should
+    // get less preemption than b2 (because b1 has higher priority)
+    verify(mDisp, never()).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+    verify(mDisp, never()).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+    verify(mDisp, never()).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(3))));
+    verify(mDisp, times(2)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(4))));
+  }
+
+  @Test
+  public void testPriorityPreemptionWithMultipleResource()
+      throws Exception {
+    String RESOURCE_1 = "res1";
+
+    riMap.put(RESOURCE_1, ResourceInformation.newInstance(RESOURCE_1, "", 0,
+        ResourceTypes.COUNTABLE, 0, Integer.MAX_VALUE));
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    /**
+     * Queue structure is:
+     *
+     * <pre>
+     *           root
+     *           /  \
+     *          a    b
+     *        /  \
+     *       a1  a2
+     * </pre>
+     *
+     * a1 and a2 are using most of resources.
+     * b needs more resources which is under served.
+     */
+    String labelsConfig =
+        "=100:100:10,true;";
+    String nodesConfig =
+        "n1=;"; // n1 is default partition
+    String queuesConfig =
+        // guaranteed,max,used,pending
+        "root(=[100:100:10 100:100:10 100:100:10 100:100:10]);" + //root
+            "-a(=[50:60:3 100:100:10 80:90:10 30:20:4]){priority=1};" + // a
+            "--a1(=[20:15:3 100:50:10 60:50:10 0]){priority=1};" + // a1
+            "--a2(=[30:45 100:50:10 20:40 30:20:4]){priority=2};" + // a2
+            "-b(=[50:40:7 100:100:10 20:10 30:10:2]){priority=1}"; // b
+
+    String appsConfig =
+        //queueName\t(priority,resource,host,expression,#repeat,reserved)
+        "a1\t" // app1 in a1
+            + "(1,6:5:1,n1,,10,false);" +
+            "a2\t" // app2 in a2
+            + "(1,2:4,n1,,10,false);" +
+            "b\t" // app3 in b
+            + "(1,2:1,n1,,10,false)";
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, true);
+    policy.editSchedule();
+
+    // Preemption should first divide capacities between a / b, and a2 should
+    // get less preemption than a1 (because a2 has higher priority). More
+    // specifically, a2 will not get preempted since the resource preempted
+    // from a1 can satisfy b already.
+    verify(mDisp, times(7)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+
+    verify(mDisp, never()).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+
+    verify(mDisp, never()).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(3))));
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDDS-11. Fix findbugs exclude rules for ozone and hdds projects. Contributed by Elek, Marton.

Posted by xk...@apache.org.
HDDS-11. Fix findbugs exclude rules for ozone and hdds projects. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d43474f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d43474f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d43474f

Branch: refs/heads/HDFS-12943
Commit: 3d43474f7567117e4e11a0d198be6aa1fc023106
Parents: eb7fe1d
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Apr 30 09:20:58 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Apr 30 09:20:58 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugsExcludeFile.xml         | 21 ++++++++++++++++++++
 hadoop-hdds/container-service/pom.xml           |  7 +++++++
 .../tools/dev-support/findbugsExcludeFile.xml   | 19 ++++++++++++++++++
 hadoop-ozone/tools/pom.xml                      | 14 +++++++++++++
 4 files changed, 61 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 0000000..3571a89
--- /dev/null
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,21 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <Match>
+    <Package name="org.apache.hadoop.hdds.protocol.proto"/>
+  </Match>
+</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-hdds/container-service/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
index 3dc8470..36c7235 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -98,6 +98,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 </project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 0000000..e6a345e
--- /dev/null
+++ b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<FindBugsFilter>
+     <Match>
+       <Package name="org.apache.hadoop.ozone.genesis.generated" />
+     </Match>
+ </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-ozone/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index 918a675..839ca0d 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -68,4 +68,18 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <version>1.19</version>
     </dependency>
   </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
+          </excludeFilterFile>
+          <fork>true</fork>
+          <maxHeap>2048</maxHeap>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
 </project>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang.

Posted by xk...@apache.org.
HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb7fe1d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb7fe1d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb7fe1d5

Branch: refs/heads/HDFS-12943
Commit: eb7fe1d588de903be2ff6e20384c25c184881532
Parents: 2c95eb8
Author: Inigo Goiri <in...@apache.org>
Authored: Sat Apr 28 09:05:30 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Sat Apr 28 09:05:30 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/LocalReplica.java      | 18 ++---
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 71 +++++++++++++-------
 2 files changed, 55 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb7fe1d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
index 2c5af11..68126a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
@@ -186,16 +186,18 @@ abstract public class LocalReplica extends ReplicaInfo {
     final FileIoProvider fileIoProvider = getFileIoProvider();
     final File tmpFile = DatanodeUtil.createFileWithExistsCheck(
         getVolume(), b, DatanodeUtil.getUnlinkTmpFile(file), fileIoProvider);
-    try (FileInputStream in = fileIoProvider.getFileInputStream(
-        getVolume(), file)) {
-      try (FileOutputStream out = fileIoProvider.getFileOutputStream(
-          getVolume(), tmpFile)) {
-        IOUtils.copyBytes(in, out, 16 * 1024);
+    try {
+      try (FileInputStream in = fileIoProvider.getFileInputStream(
+          getVolume(), file)) {
+        try (FileOutputStream out = fileIoProvider.getFileOutputStream(
+            getVolume(), tmpFile)) {
+          IOUtils.copyBytes(in, out, 16 * 1024);
+        }
       }
       if (file.length() != tmpFile.length()) {
-        throw new IOException("Copy of file " + file + " size " + file.length()+
-                              " into file " + tmpFile +
-                              " resulted in a size of " + tmpFile.length());
+        throw new IOException("Copy of file " + file + " size " + file.length()
+            + " into file " + tmpFile + " resulted in a size of "
+            + tmpFile.length());
       }
       fileIoProvider.replaceFile(getVolume(), tmpFile, file);
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb7fe1d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 20cec6a..aa8afb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
@@ -120,7 +121,9 @@ public class TestFileAppend{
   @Test
   public void testBreakHardlinksIfNeeded() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     FileSystem fs = cluster.getFileSystem();
     InetSocketAddress addr = new InetSocketAddress("localhost",
                                                    cluster.getNameNodePort());
@@ -186,7 +189,9 @@ public class TestFileAppend{
   public void testSimpleFlush() throws IOException {
     Configuration conf = new HdfsConfiguration();
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     DistributedFileSystem fs = cluster.getFileSystem();
     try {
 
@@ -239,7 +244,9 @@ public class TestFileAppend{
   public void testComplexFlush() throws IOException {
     Configuration conf = new HdfsConfiguration();
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     DistributedFileSystem fs = cluster.getFileSystem();
     try {
 
@@ -286,7 +293,9 @@ public class TestFileAppend{
   @Test(expected = FileNotFoundException.class)
   public void testFileNotFound() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     FileSystem fs = cluster.getFileSystem();
     try {
       Path file1 = new Path("/nonexistingfile.dat");
@@ -301,7 +310,9 @@ public class TestFileAppend{
   @Test
   public void testAppendTwice() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     final FileSystem fs1 = cluster.getFileSystem();
     final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
     try {
@@ -340,7 +351,9 @@ public class TestFileAppend{
   @Test
   public void testAppend2Twice() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     final DistributedFileSystem fs1 = cluster.getFileSystem();
     final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
     try {
@@ -386,8 +399,9 @@ public class TestFileAppend{
         HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY,
         false);
 
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
+        builderBaseDir).numDataNodes(4).build();
     final DistributedFileSystem fs = cluster.getFileSystem();
     try {
       final Path p = new Path("/testMultipleAppend/foo");
@@ -438,8 +452,9 @@ public class TestFileAppend{
     final long softLimit = 1L;
     final long hardLimit = 9999999L;
 
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     cluster.setLeasePeriod(softLimit, hardLimit);
     cluster.waitActive();
 
@@ -478,8 +493,9 @@ public class TestFileAppend{
     final long softLimit = 1L;
     final long hardLimit = 9999999L;
 
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     cluster.setLeasePeriod(softLimit, hardLimit);
     cluster.waitActive();
 
@@ -525,8 +541,9 @@ public class TestFileAppend{
     Configuration conf = new HdfsConfiguration();
     conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
         "false");
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(3).build();
     DistributedFileSystem fs = null;
     try {
       fs = cluster.getFileSystem();
@@ -578,8 +595,9 @@ public class TestFileAppend{
     Configuration conf = new HdfsConfiguration();
     conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
         "false");
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(3).build();
     DistributedFileSystem fs = null;
     final String hello = "hello\n";
     try {
@@ -650,8 +668,9 @@ public class TestFileAppend{
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
     conf.setInt("dfs.min.replication", 1);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     try {
       DistributedFileSystem fs = cluster.getFileSystem();
       Path fileName = new Path("/appendCorruptBlock");
@@ -676,7 +695,9 @@ public class TestFileAppend{
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
     conf.setInt("dfs.min.replication", 1);
 
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -693,9 +714,9 @@ public class TestFileAppend{
       // Call FsDatasetImpl#append to append the block file,
       // which converts it to a rbw replica.
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
-      long newGS = block.getGenerationStamp()+1;
-      ReplicaHandler
-          replicaHandler = dataSet.append(block, newGS, initialFileLength);
+      long newGS = block.getGenerationStamp() + 1;
+      ReplicaHandler replicaHandler =
+          dataSet.append(block, newGS, initialFileLength);
 
       // write data to block file
       ReplicaBeingWritten rbw =
@@ -711,9 +732,8 @@ public class TestFileAppend{
 
       // update checksum file
       final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
-      FsDatasetUtil.computeChecksum(
-          rbw.getMetaFile(), rbw.getMetaFile(), rbw.getBlockFile(),
-          smallBufferSize, conf);
+      FsDatasetUtil.computeChecksum(rbw.getMetaFile(), rbw.getMetaFile(),
+          rbw.getBlockFile(), smallBufferSize, conf);
 
       // read the block
       // the DataNode BlockSender should read from the rbw replica's in-memory
@@ -725,5 +745,4 @@ public class TestFileAppend{
       cluster.shutdown();
     }
   }
-
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-7799. Improved YARN service jar file handling. Contributed by Billie Rinaldi

Posted by xk...@apache.org.
YARN-7799. Improved YARN service jar file handling.
           Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24eeea8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24eeea8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24eeea8b

Branch: refs/heads/HDFS-12943
Commit: 24eeea8b18749e02ea05b03eb18e3cf0455077c5
Parents: 9e2cfb2
Author: Eric Yang <ey...@apache.org>
Authored: Tue May 1 16:46:34 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Tue May 1 16:46:34 2018 -0400

----------------------------------------------------------------------
 .../hadoop-yarn-services-core/pom.xml           |   5 +
 .../yarn/service/client/ServiceClient.java      |  95 +++++++++++----
 .../yarn/service/utils/CoreFileSystem.java      |   7 ++
 .../yarn/service/client/TestServiceCLI.java     | 118 +++++++++++++++++--
 .../markdown/yarn-service/Configurations.md     |   2 +-
 5 files changed, 195 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24eeea8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
index 3ce8876..7efe8bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
@@ -173,6 +173,11 @@
     </dependency>
 
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+    </dependency>
+
+    <dependency>
       <groupId>com.google.protobuf</groupId>
       <artifactId>protobuf-java</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24eeea8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 8dd5342..67306d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -28,7 +28,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.registry.client.api.RegistryConstants;
@@ -37,8 +39,8 @@ import org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
 import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
@@ -896,13 +898,13 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
 
   protected Path addJarResource(String serviceName,
       Map<String, LocalResource> localResources)
-      throws IOException, SliderException {
+      throws IOException, YarnException {
     Path libPath = fs.buildClusterDirPath(serviceName);
     ProviderUtils
         .addProviderJar(localResources, ServiceMaster.class, SERVICE_CORE_JAR, fs,
             libPath, "lib", false);
     Path dependencyLibTarGzip = fs.getDependencyTarGzip();
-    if (fs.isFile(dependencyLibTarGzip)) {
+    if (actionDependency(null, false) == EXIT_SUCCESS) {
       LOG.info("Loading lib tar from " + dependencyLibTarGzip);
       fs.submitTarGzipAndUpdate(localResources);
     } else {
@@ -1223,18 +1225,18 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     return actionDependency(destinationFolder, true);
   }
 
-  public int actionDependency(String destinationFolder, boolean overwrite)
-      throws IOException, YarnException {
+  public int actionDependency(String destinationFolder, boolean overwrite) {
     String currentUser = RegistryUtils.currentUser();
     LOG.info("Running command as user {}", currentUser);
 
+    Path dependencyLibTarGzip;
     if (destinationFolder == null) {
-      destinationFolder = String.format(YarnServiceConstants.DEPENDENCY_DIR,
-          VersionInfo.getVersion());
+      dependencyLibTarGzip = fs.getDependencyTarGzip();
+    } else {
+      dependencyLibTarGzip = new Path(destinationFolder,
+          YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME
+              + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT);
     }
-    Path dependencyLibTarGzip = new Path(destinationFolder,
-        YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME
-            + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT);
 
     // Check if dependency has already been uploaded, in which case log
     // appropriately and exit success (unless overwrite has been requested)
@@ -1247,24 +1249,71 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
 
     String[] libDirs = ServiceUtils.getLibDirs();
     if (libDirs.length > 0) {
-      File tempLibTarGzipFile = File.createTempFile(
-          YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME + "_",
-          YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT);
-      // copy all jars
-      tarGzipFolder(libDirs, tempLibTarGzipFile, createJarFilter());
-
-      LOG.info("Version Info: " + VersionInfo.getBuildVersion());
-      fs.copyLocalFileToHdfs(tempLibTarGzipFile, dependencyLibTarGzip,
-          new FsPermission(YarnServiceConstants.DEPENDENCY_DIR_PERMISSIONS));
-      LOG.info("To let apps use this tarball, in yarn-site set config property "
-          + "{} to {}", YarnServiceConf.DEPENDENCY_TARBALL_PATH,
-          dependencyLibTarGzip);
-      return EXIT_SUCCESS;
+      File tempLibTarGzipFile = null;
+      try {
+        if (!checkPermissions(dependencyLibTarGzip)) {
+          return EXIT_UNAUTHORIZED;
+        }
+
+        tempLibTarGzipFile = File.createTempFile(
+            YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME + "_",
+            YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT);
+        // copy all jars
+        tarGzipFolder(libDirs, tempLibTarGzipFile, createJarFilter());
+
+        fs.copyLocalFileToHdfs(tempLibTarGzipFile, dependencyLibTarGzip,
+            new FsPermission(YarnServiceConstants.DEPENDENCY_DIR_PERMISSIONS));
+        LOG.info("To let apps use this tarball, in yarn-site set config " +
+                "property {} to {}", YarnServiceConf.DEPENDENCY_TARBALL_PATH,
+            dependencyLibTarGzip);
+        return EXIT_SUCCESS;
+      } catch (IOException e) {
+        LOG.error("Got exception creating tarball and uploading to HDFS", e);
+        return EXIT_EXCEPTION_THROWN;
+      } finally {
+        if (tempLibTarGzipFile != null) {
+          if (!tempLibTarGzipFile.delete()) {
+            LOG.warn("Failed to delete tmp file {}", tempLibTarGzipFile);
+          }
+        }
+      }
     } else {
       return EXIT_FALSE;
     }
   }
 
+  private boolean checkPermissions(Path dependencyLibTarGzip) throws
+      IOException {
+    AccessControlList yarnAdminAcl = new AccessControlList(getConfig().get(
+        YarnConfiguration.YARN_ADMIN_ACL,
+        YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
+    AccessControlList dfsAdminAcl = new AccessControlList(
+        getConfig().get(DFSConfigKeys.DFS_ADMIN, " "));
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    if (!yarnAdminAcl.isUserAllowed(ugi) && !dfsAdminAcl.isUserAllowed(ugi)) {
+      LOG.error("User must be on the {} or {} list to have permission to " +
+          "upload AM dependency tarball", YarnConfiguration.YARN_ADMIN_ACL,
+          DFSConfigKeys.DFS_ADMIN);
+      return false;
+    }
+
+    Path parent = dependencyLibTarGzip.getParent();
+    while (parent != null) {
+      if (fs.getFileSystem().exists(parent)) {
+        FsPermission perm = fs.getFileSystem().getFileStatus(parent)
+            .getPermission();
+        if (!perm.getOtherAction().implies(FsAction.READ_EXECUTE)) {
+          LOG.error("Parent directory {} of {} tarball location {} does not " +
+              "have world read/execute permission", parent, YarnServiceConf
+              .DEPENDENCY_TARBALL_PATH, dependencyLibTarGzip);
+          return false;
+        }
+      }
+      parent = parent.getParent();
+    }
+    return true;
+  }
+
   protected ClientAMProtocol createAMProxy(String serviceName,
       ApplicationReport appReport) throws IOException, YarnException {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24eeea8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java
index 5c2bac6..50b22e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -363,6 +364,12 @@ public class CoreFileSystem {
     if (configuredDependencyTarballPath != null) {
       dependencyLibTarGzip = new Path(configuredDependencyTarballPath);
     }
+    if (dependencyLibTarGzip == null) {
+      dependencyLibTarGzip = new Path(String.format(YarnServiceConstants
+          .DEPENDENCY_DIR, VersionInfo.getVersion()),
+          YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME
+              + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT);
+    }
     return dependencyLibTarGzip;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24eeea8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
index 7290962..c40a39d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
@@ -20,6 +20,10 @@ package org.apache.hadoop.yarn.service.client;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.yarn.client.cli.ApplicationCLI;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -27,12 +31,15 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.conf.ExampleAppJson;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -40,21 +47,33 @@ import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.Arrays;
 import java.util.List;
 
 import static org.apache.hadoop.yarn.client.api.AppAdminClient.YARN_APP_ADMIN_CLIENT_PREFIX;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEPENDENCY_TARBALL_PATH;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH;
+import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.EXIT_SUCCESS;
+import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.EXIT_UNAUTHORIZED;
 import static org.mockito.Mockito.spy;
 
 public class TestServiceCLI {
   private static final Logger LOG = LoggerFactory.getLogger(TestServiceCLI
       .class);
 
+  @Rule
+  public TemporaryFolder tmpFolder = new TemporaryFolder();
+
   private Configuration conf = new YarnConfiguration();
-  private File basedir;
   private SliderFileSystem fs;
-  private String basedirProp;
   private ApplicationCLI cli;
+  private File basedir;
+  private String basedirProp;
+  private File dependencyTarGzBaseDir;
+  private Path dependencyTarGz;
+  private String dependencyTarGzProp;
+  private String yarnAdminNoneAclProp;
+  private String dfsAdminAclProp;
 
   private void createCLI() {
     cli = new ApplicationCLI();
@@ -67,12 +86,17 @@ public class TestServiceCLI {
     cli.setConf(conf);
   }
 
+  private int runCLI(String[] args) throws Exception {
+    LOG.info("running CLI: yarn {}", Arrays.asList(args));
+    return ToolRunner.run(cli, ApplicationCLI.preProcessArgs(args));
+  }
+
   private void buildApp(String serviceName, String appDef) throws Throwable {
     String[] args = {"app",
         "-D", basedirProp, "-save", serviceName,
         ExampleAppJson.resourceName(appDef),
         "-appTypes", DUMMY_APP_TYPE};
-    ToolRunner.run(cli, ApplicationCLI.preProcessArgs(args));
+    Assert.assertEquals(EXIT_SUCCESS, runCLI(args));
   }
 
   private void buildApp(String serviceName, String appDef,
@@ -83,7 +107,13 @@ public class TestServiceCLI {
         "-appTypes", DUMMY_APP_TYPE,
         "-updateLifetime", lifetime,
         "-changeQueue", queue};
-    ToolRunner.run(cli, ApplicationCLI.preProcessArgs(args));
+    Assert.assertEquals(EXIT_SUCCESS, runCLI(args));
+  }
+
+  private static Path getDependencyTarGz(File dir) {
+    return new Path(new File(dir, YarnServiceConstants
+        .DEPENDENCY_TAR_GZ_FILE_NAME + YarnServiceConstants
+        .DEPENDENCY_TAR_GZ_FILE_EXT).getAbsolutePath());
   }
 
   @Before
@@ -91,12 +121,22 @@ public class TestServiceCLI {
     basedir = new File("target", "apps");
     basedirProp = YARN_SERVICE_BASE_PATH + "=" + basedir.getAbsolutePath();
     conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath());
+    dependencyTarGzBaseDir = tmpFolder.getRoot();
+    dependencyTarGz = getDependencyTarGz(dependencyTarGzBaseDir);
+    dependencyTarGzProp = DEPENDENCY_TARBALL_PATH + "=" + dependencyTarGz
+        .toString();
+    conf.set(DEPENDENCY_TARBALL_PATH, dependencyTarGz.toString());
     fs = new SliderFileSystem(conf);
     if (basedir.exists()) {
       FileUtils.deleteDirectory(basedir);
     } else {
       basedir.mkdirs();
     }
+    yarnAdminNoneAclProp = YarnConfiguration.YARN_ADMIN_ACL + "=none";
+    dfsAdminAclProp = DFSConfigKeys.DFS_ADMIN + "=" +
+        UserGroupInformation.getCurrentUser();
+    System.setProperty(YarnServiceConstants.PROPERTY_LIB_DIR, basedir
+        .getAbsolutePath());
     createCLI();
   }
 
@@ -108,7 +148,7 @@ public class TestServiceCLI {
     cli.stop();
   }
 
-  @Test
+  @Test (timeout = 180000)
   public void testFlexComponents() throws Throwable {
     // currently can only test building apps, since that is the only
     // operation that doesn't require an RM
@@ -122,7 +162,7 @@ public class TestServiceCLI {
     checkApp(serviceName, "master", 1L, 1000L, "qname");
   }
 
-  @Test
+  @Test (timeout = 180000)
   public void testInitiateServiceUpgrade() throws Exception {
     String[] args = {"app", "-upgrade", "app-1",
         "-initiate", ExampleAppJson.resourceName(ExampleAppJson.APP_JSON),
@@ -131,7 +171,7 @@ public class TestServiceCLI {
     Assert.assertEquals(result, 0);
   }
 
-  @Test
+  @Test (timeout = 180000)
   public void testInitiateAutoFinalizeServiceUpgrade() throws Exception {
     String[] args =  {"app", "-upgrade", "app-1",
         "-initiate", ExampleAppJson.resourceName(ExampleAppJson.APP_JSON),
@@ -141,7 +181,7 @@ public class TestServiceCLI {
     Assert.assertEquals(result, 0);
   }
 
-  @Test
+  @Test (timeout = 180000)
   public void testUpgradeInstances() throws Exception {
     conf.set(YARN_APP_ADMIN_CLIENT_PREFIX + DUMMY_APP_TYPE,
         DummyServiceClient.class.getName());
@@ -153,6 +193,68 @@ public class TestServiceCLI {
     Assert.assertEquals(result, 0);
   }
 
+  @Test (timeout = 180000)
+  public void testEnableFastLaunch() throws Exception {
+    fs.getFileSystem().create(new Path(basedir.getAbsolutePath(), "test.jar"))
+        .close();
+
+    Path defaultPath = new Path(dependencyTarGz.toString());
+    Assert.assertFalse("Dependency tarball should not exist before the test",
+        fs.isFile(defaultPath));
+    String[] args = {"app", "-D", dependencyTarGzProp, "-enableFastLaunch",
+        "-appTypes", DUMMY_APP_TYPE};
+    Assert.assertEquals(EXIT_SUCCESS, runCLI(args));
+    Assert.assertTrue("Dependency tarball did not exist after the test",
+        fs.isFile(defaultPath));
+
+    File secondBaseDir = new File(dependencyTarGzBaseDir, "2");
+    Path secondTarGz = getDependencyTarGz(secondBaseDir);
+    Assert.assertFalse("Dependency tarball should not exist before the test",
+        fs.isFile(secondTarGz));
+    String[] args2 = {"app", "-D", yarnAdminNoneAclProp, "-D",
+        dfsAdminAclProp, "-D", dependencyTarGzProp, "-enableFastLaunch",
+        secondBaseDir.getAbsolutePath(), "-appTypes", DUMMY_APP_TYPE};
+    Assert.assertEquals(EXIT_SUCCESS, runCLI(args2));
+    Assert.assertTrue("Dependency tarball did not exist after the test",
+        fs.isFile(secondTarGz));
+  }
+
+  @Test (timeout = 180000)
+  public void testEnableFastLaunchUserPermissions() throws Exception {
+    String[] args = {"app", "-D", yarnAdminNoneAclProp, "-D",
+        dependencyTarGzProp, "-enableFastLaunch", "-appTypes", DUMMY_APP_TYPE};
+    Assert.assertEquals(EXIT_UNAUTHORIZED, runCLI(args));
+  }
+
+  @Test (timeout = 180000)
+  public void testEnableFastLaunchFilePermissions() throws Exception {
+    File badDir = new File(dependencyTarGzBaseDir, "bad");
+    badDir.mkdir();
+    fs.getFileSystem().setPermission(new Path(badDir.getAbsolutePath()),
+        new FsPermission("751"));
+
+    String[] args = {"app", "-D", dependencyTarGzProp, "-enableFastLaunch",
+        badDir.getAbsolutePath(), "-appTypes", DUMMY_APP_TYPE};
+    Assert.assertEquals(EXIT_UNAUTHORIZED, runCLI(args));
+
+    badDir = new File(badDir, "child");
+    badDir.mkdir();
+    fs.getFileSystem().setPermission(new Path(badDir.getAbsolutePath()),
+        new FsPermission("755"));
+
+    String[] args2 = {"app", "-D", dependencyTarGzProp, "-enableFastLaunch",
+        badDir.getAbsolutePath(), "-appTypes", DUMMY_APP_TYPE};
+    Assert.assertEquals(EXIT_UNAUTHORIZED, runCLI(args2));
+
+    badDir = new File(dependencyTarGzBaseDir, "badx");
+    badDir.mkdir();
+    fs.getFileSystem().setPermission(new Path(badDir.getAbsolutePath()),
+        new FsPermission("754"));
+
+    String[] args3 = {"app", "-D", dependencyTarGzProp, "-enableFastLaunch",
+        badDir.getAbsolutePath(), "-appTypes", DUMMY_APP_TYPE};
+    Assert.assertEquals(EXIT_UNAUTHORIZED, runCLI(args3));
+  }
 
   private void checkApp(String serviceName, String compName, long count, Long
       lifetime, String queue) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24eeea8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md
index 75186dc..524cfb9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md
@@ -100,7 +100,7 @@ System-wide service AM properties can only be configured in the cluster `yarn-si
 
 | System-Level Config Name | Description |
 | ------------ | ------------- |
-|yarn.service.framework.path | HDFS parent directory where the service AM dependency tarball can be found.|
+|yarn.service.framework.path | HDFS path of the service AM dependency tarball. When no file exists at this location, AM dependencies will be uploaded by the RM the first time a service is started or launched. If the RM user does not have permission to upload the file to this location or the location is not world readable, the AM dependency jars will be uploaded each time a service is started or launched. If unspecified, value will be assumed to be /yarn-services/${hadoop.version}/service-dep.tar.gz.|
 |yarn.service.base.path | HDFS parent directory where service artifacts will be stored (default ${user_home_dir}/.yarn/).
 |yarn.service.client-am.retry.max-wait-ms | Max retry time in milliseconds for the service client to talk to the service AM (default 900000, i.e. 15 minutes).|
 |yarn.service.client-am.retry-interval-ms | Retry interval in milliseconds for the service client to talk to the service AM (default 2000, i.e. 2 seconds).|


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

Posted by xk...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
----------------------------------------------------------------------
diff --cc hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index ebeff94,0497931..507517b
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@@ -408,34 -404,11 +408,34 @@@ public class WritableRpcEngine implemen
          boolean verbose, SecretManager<? extends TokenIdentifier> secretManager,
          String portRangeConfig) 
          throws IOException {
 +      this(null, protocolImpl,  conf,  bindAddress,   port,
 +          numHandlers,  numReaders,  queueSizePerHandler,  verbose,
 +          secretManager, null, null);
 +    }
 +
 +    /**
 +     * Construct an RPC server.
 +     * @param protocolClass - the protocol being registered
 +     *     can be null for compatibility with old usage (see below for details)
 +     * @param protocolImpl the protocol impl that will be called
 +     * @param conf the configuration to use
 +     * @param bindAddress the address to bind on to listen for connection
 +     * @param port the port to listen for connections on
 +     * @param numHandlers the number of method handler threads to run
 +     * @param verbose whether each call should be logged
 +     * @param alignmentContext provides server state info on client responses
 +     */
 +    public Server(Class<?> protocolClass, Object protocolImpl,
 +        Configuration conf, String bindAddress,  int port,
 +        int numHandlers, int numReaders, int queueSizePerHandler,
 +        boolean verbose, SecretManager<? extends TokenIdentifier> secretManager,
 +        String portRangeConfig, AlignmentContext alignmentContext)
 +        throws IOException {
        super(bindAddress, port, null, numHandlers, numReaders,
            queueSizePerHandler, conf,
-           classNameBase(protocolImpl.getClass().getName()), secretManager,
+           serverNameFromClass(protocolImpl.getClass()), secretManager,
            portRangeConfig);
 -
 +      setAlignmentContext(alignmentContext);
        this.verbose = verbose;
        
        

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-7781. Update YARN service documentation. Contributed by Gour Saha

Posted by xk...@apache.org.
YARN-7781. Update YARN service documentation.
           Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24a5ccbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24a5ccbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24a5ccbf

Branch: refs/heads/HDFS-12943
Commit: 24a5ccbf4bda413a98480d52c204d56f82ef9ac5
Parents: 14b4799
Author: Eric Yang <ey...@apache.org>
Authored: Fri Apr 27 12:38:30 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Fri Apr 27 12:38:30 2018 -0400

----------------------------------------------------------------------
 .../definition/YARN-Services-Examples.md        | 236 +++++++++++++++++--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  21 +-
 .../yarn/service/api/records/BaseResource.java  |   2 +-
 .../site/markdown/yarn-service/QuickStart.md    |  13 +-
 .../markdown/yarn-service/YarnServiceAPI.md     |  61 +++--
 5 files changed, 279 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24a5ccbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index 22f941e..a4ef2d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -15,7 +15,7 @@
 ## Examples
 
 ### Create a simple single-component service with most attribute values as defaults
-POST URL - http://localhost:9191/ws/v1/services
+POST URL - http://localhost:8088/app/v1/services
 
 ##### POST Request JSON
 ```json
@@ -27,7 +27,7 @@ POST URL - http://localhost:9191/ws/v1/services
     [
       {
         "name": "hello",
-        "number_of_containers": 1,
+        "number_of_containers": 2,
         "artifact": {
           "id": "nginx:latest",
           "type": "DOCKER"
@@ -36,14 +36,14 @@ POST URL - http://localhost:9191/ws/v1/services
         "resource": {
           "cpus": 1,
           "memory": "256"
-       }
+        }
       }
     ]
 }
 ```
 
 ##### GET Response JSON
-GET URL - http://localhost:9191/ws/v1/services/hello-world
+GET URL - http://localhost:8088/app/v1/services/hello-world
 
 Note, lifetime value of -1 means unlimited lifetime.
 
@@ -54,10 +54,11 @@ Note, lifetime value of -1 means unlimited lifetime.
     "description": "hello world example",
     "id": "application_1503963985568_0002",
     "lifetime": -1,
+    "state": "STABLE",
     "components": [
         {
             "name": "hello",
-            "dependencies": [],
+            "state": "STABLE",
             "resource": {
                 "cpus": 1,
                 "memory": "256"
@@ -70,21 +71,21 @@ Note, lifetime value of -1 means unlimited lifetime.
             "quicklinks": [],
             "containers": [
                 {
-                    "id": "container_e03_1503963985568_0002_01_000001",
+                    "id": "container_e03_1503963985568_0002_01_000002",
                     "ip": "10.22.8.143",
-                    "hostname": "myhost.local",
+                    "hostname": "ctr-e03-1503963985568-0002-01-000002.example.site",
                     "state": "READY",
                     "launch_time": 1504051512412,
-                    "bare_host": "10.22.8.143",
+                    "bare_host": "host100.cloud.com",
                     "component_instance_name": "hello-0"
                 },
                 {
-                    "id": "container_e03_1503963985568_0002_01_000002",
-                    "ip": "10.22.8.143",
-                    "hostname": "myhost.local",
+                    "id": "container_e03_1503963985568_0002_01_000003",
+                    "ip": "10.22.8.144",
+                    "hostname": "ctr-e03-1503963985568-0002-01-000003.example.site",
                     "state": "READY",
                     "launch_time": 1504051536450,
-                    "bare_host": "10.22.8.143",
+                    "bare_host": "host100.cloud.com",
                     "component_instance_name": "hello-1"
                 }
             ],
@@ -103,7 +104,7 @@ Note, lifetime value of -1 means unlimited lifetime.
 
 ```
 ### Update to modify the lifetime of a service
-PUT URL - http://localhost:9191/ws/v1/services/hello-world
+PUT URL - http://localhost:8088/app/v1/services/hello-world
 
 ##### PUT Request JSON
 
@@ -115,43 +116,59 @@ Note, irrespective of what the current lifetime value is, this update request wi
 }
 ```
 ### Stop a service
-PUT URL - http://localhost:9191/ws/v1/services/hello-world
+PUT URL - http://localhost:8088/app/v1/services/hello-world
 
 ##### PUT Request JSON
 ```json
 {
-    "state": "STOPPED"
+  "state": "STOPPED"
 }
 ```
 
 ### Start a service
-PUT URL - http://localhost:9191/ws/v1/services/hello-world
+PUT URL - http://localhost:8088/app/v1/services/hello-world
 
 ##### PUT Request JSON
 ```json
 {
-    "state": "STARTED"
+  "state": "STARTED"
 }
 ```
 
-### Update to flex up/down the no of containers (instances) of a component of a service
-PUT URL - http://localhost:9191/ws/v1/services/hello-world/components/hello
+### Update to flex up/down the number of containers (instances) of a component of a service
+PUT URL - http://localhost:8088/app/v1/services/hello-world/components/hello
 
 ##### PUT Request JSON
 ```json
 {
-    "name": "hello",
-    "number_of_containers": 3
+  "number_of_containers": 3
+}
+```
+
+Alternatively, you can specify the entire "components" section instead.
+
+PUT URL - http://localhost:8088/app/v1/services/hello-world
+##### PUT Request JSON
+```json
+{
+  "state": "FLEX",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 3
+      }
+    ]
 }
 ```
 
 ### Destroy a service
-DELETE URL - http://localhost:9191/ws/v1/services/hello-world
+DELETE URL - http://localhost:8088/app/v1/services/hello-world
 
 ***
 
 ### Create a complicated service  - HBase
-POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1
+POST URL - http://localhost:8088:/app/v1/services/hbase-app-1
 
 ##### POST Request JSON
 
@@ -249,3 +266,176 @@ POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1
   }
 }
 ```
+
+### Create a service requesting GPUs in addition to CPUs and RAM
+POST URL - http://localhost:8088/app/v1/services
+
+##### POST Request JSON
+```json
+{
+  "name": "hello-world",
+  "version": "1.0.0",
+  "description": "hello world example with GPUs",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 2,
+        "artifact": {
+          "id": "nginx:latest",
+          "type": "DOCKER"
+        },
+        "launch_command": "./start_nginx.sh",
+        "resource": {
+          "cpus": 1,
+          "memory": "256",
+          "additional" : {
+            "yarn.io/gpu" : {
+              "value" : 4,
+              "unit" : ""
+            }
+          }
+        }
+      }
+    ]
+}
+```
+
+### Create a service with a component requesting anti-affinity placement policy
+POST URL - http://localhost:8088/app/v1/services
+
+##### POST Request JSON
+```json
+{
+  "name": "hello-world",
+  "version": "1.0.0",
+  "description": "hello world example with anti-affinity",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 3,
+        "artifact": {
+          "id": "nginx:latest",
+          "type": "DOCKER"
+        },
+        "launch_command": "./start_nginx.sh",
+        "resource": {
+          "cpus": 1,
+          "memory": "256"
+        },
+        "placement_policy": {
+          "constraints": [
+            {
+              "type": "ANTI_AFFINITY",
+              "scope": "NODE",
+              "node_attributes": {
+                "os": ["linux", "windows"],
+                "fault_domain": ["fd1", "fd2"]
+              },
+              "node_partitions": [
+                "gpu",
+                "fast-disk"
+              ],
+              "target_tags": [
+                "hello"
+              ]
+            }
+          ]
+        }
+      }
+    ]
+}
+```
+
+##### GET Response JSON
+GET URL - http://localhost:8088/app/v1/services/hello-world
+
+Note, that the 3 containers will come up on 3 different nodes. If there are less
+than 3 NMs running in the cluster, then all 3 container requests will not be
+fulfilled and the service will be in non-STABLE state.
+
+```json
+{
+    "name": "hello-world",
+    "version": "1.0.0",
+    "description": "hello world example with anti-affinity",
+    "id": "application_1503963985568_0003",
+    "lifetime": -1,
+    "state": "STABLE",
+    "components": [
+        {
+            "name": "hello",
+            "state": "STABLE",
+            "resource": {
+                "cpus": 1,
+                "memory": "256"
+            },
+            "placement_policy": {
+              "constraints": [
+                {
+                  "type": "ANTI_AFFINITY",
+                  "scope": "NODE",
+                  "node_attributes": {
+                    "os": ["linux", "windows"],
+                    "fault_domain": ["fd1", "fd2"]
+                  },
+                  "node_partitions": [
+                    "gpu",
+                    "fast-disk"
+                  ],
+                  "target_tags": [
+                    "hello"
+                  ]
+                }
+              ]
+            },
+            "configuration": {
+                "properties": {},
+                "env": {},
+                "files": []
+            },
+            "quicklinks": [],
+            "containers": [
+                {
+                    "id": "container_e03_1503963985568_0003_01_000002",
+                    "ip": "10.22.8.143",
+                    "hostname": "ctr-e03-1503963985568-0003-01-000002.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051512412,
+                    "bare_host": "host100.cloud.com",
+                    "component_instance_name": "hello-0"
+                },
+                {
+                    "id": "container_e03_1503963985568_0003_01_000003",
+                    "ip": "10.22.8.144",
+                    "hostname": "ctr-e03-1503963985568-0003-01-000003.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051536450,
+                    "bare_host": "host101.cloud.com",
+                    "component_instance_name": "hello-1"
+                },
+                {
+                    "id": "container_e03_1503963985568_0003_01_000004",
+                    "ip": "10.22.8.145",
+                    "hostname": "ctr-e03-1503963985568-0003-01-000004.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051536450,
+                    "bare_host": "host102.cloud.com",
+                    "component_instance_name": "hello-2"
+                }
+            ],
+            "launch_command": "./start_nginx.sh",
+            "number_of_containers": 1,
+            "run_privileged_container": false
+        }
+    ],
+    "configuration": {
+        "properties": {},
+        "env": {},
+        "files": []
+    },
+    "quicklinks": {}
+}
+```
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24a5ccbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index a604af7..8c5ad65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -36,8 +36,8 @@ info:
     name: Apache 2.0
     url: http://www.apache.org/licenses/LICENSE-2.0.html
 # the domain of the service
-host: host.mycompany.com
-port: 9191(default)
+host: localhost
+port: 8088(default)
 # array of all schemes that your API supports
 schemes:
   - http
@@ -249,7 +249,7 @@ definitions:
         type: string
         description: The YARN queue that this service should be submitted to.
       kerberos_principal:
-        description: The Kerberos Principal of the service
+        description: The principal info of the user who launches the service.
         $ref: '#/definitions/KerberosPrincipal'
       docker_client_config:
         type: string
@@ -283,7 +283,7 @@ definitions:
         type: object
         additionalProperties:
           $ref: '#/definitions/ResourceInformation'
-        description: Map of resource name to ResourceInformation
+        description: A map of resource type name to resource type information. Including value (integer), and unit (string). This will be used to specify resource other than cpu and memory. Please refer to example below.
   PlacementPolicy:
     description: Advanced placement policy of the components of a service.
     required:
@@ -535,6 +535,7 @@ definitions:
           - STOPPED
           - FAILED
           - FLEX
+          - UPGRADING
   ContainerState:
     description: The current state of the container of a service.
     properties:
@@ -552,8 +553,10 @@ definitions:
         type: string
         description: enum of the state of the component
         enum:
+          - INIT
           - FLEXING
           - STABLE
+          - UPGRADING
   ServiceStatus:
     description: The current status of a submitted service, returned as a response to the GET API.
     properties:
@@ -572,10 +575,10 @@ definitions:
     properties:
       principal_name:
         type: string
-        description: The principal name of the user who launches the service.
+        description: The principal name of the user who launches the service. Note that `_HOST` is required in the `principal_name` field such as `testuser/_HOST@EXAMPLE.COM` because Hadoop client validates that the server's (in this case, the AM's) principal has hostname present when communicating to the server.
       keytab:
         type: string
-        description: |
-          The URI of the kerberos keytab. It supports two modes:
-          URI starts with "hdfs://": A path on hdfs where the keytab is stored. The keytab will be localized by YARN to each host.
-          URI starts with "file://": A path on the local host where the keytab is stored. It is assumed that the keytabs are pre-installed by admins before AM launches.
+        description: The URI of the kerberos keytab. Currently supports only files present on the bare host. URI starts with "file\://" - A path on the local host where the keytab is stored. It is assumed that admin pre-installs the keytabs on the local host before AM launches.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24a5ccbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java
index 7ac86d4..3d61fb7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java
@@ -30,7 +30,7 @@ public class BaseResource implements Serializable {
 
   /**
    * Resource location for a service, e.g.
-   * /ws/v1/services/helloworld
+   * /app/v1/services/helloworld
    *
    **/
   public String getUri() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24a5ccbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
index e91380c..e144320 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
@@ -207,7 +207,7 @@ If you are building from source code, make sure you use `-Pyarn-ui` in the `mvn`
 ```
 
 # Run with security
-YARN service framework supports running in a secure(kerberized) environment. User needs to specify the kerberos principal name and keytab when they launch the service.
+YARN service framework supports running in a secure (kerberized) environment. User needs to specify the kerberos principal name and keytab when they launch the service.
 E.g. A typical configuration looks like below:
 ```
 {
@@ -215,15 +215,16 @@ E.g. A typical configuration looks like below:
   ...
   ...
   "kerberos_principal" : {
-    "principal_name" : "hdfs-demo@EXAMPLE.COM",
-    "keytab" : "hdfs:///etc/security/keytabs/hdfs.headless.keytab"
+    "principal_name" : "hdfs-demo/_HOST@EXAMPLE.COM",
+    "keytab" : "file:///etc/security/keytabs/hdfs.headless.keytab"
   }
 }
 ```
+Note that `_HOST` is required in the `principal_name` field because Hadoop client validates that the server's (in this case, the AM's) principal has hostname present when communicating to the server.
 * principal_name : the principal name of the user who launches the service
-* keytab : URI of the keytab. It supports two modes:
-    * URI starts with `hdfs://`: The URI where the keytab is stored on hdfs. The keytab will be localized to each node by YARN.
-    * URI starts with `file://`: The URI where the keytab is stored on local host. It is assumed that admin pre-installs the keytabs on the local host before AM launches.
+* keytab : URI of the keytab. Currently supports only files present on the bare host.
+    * URI starts with `file://` - A path on the local host where the keytab is stored. It is assumed that admin pre-installs the keytabs on the local host before AM launches.
+
 # Run with Docker
 The above example is only for a non-docker container based service. YARN Service Framework also provides first-class support for managing docker based services.
 Most of the steps for managing docker based services are the same except that in docker the `Artifact` type for a component is `DOCKER` and the Artifact `id` is the name of the docker image.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24a5ccbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index e6f8493..496c1a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -241,7 +241,7 @@ The state of the component
 
 |Name|Description|Required|Schema|Default|
 |----|----|----|----|----|
-|state|enum of the state of the component|false|enum (FLEXING, STABLE)||
+|state|enum of the state of the component|false|enum (INIT, FLEXING, STABLE, UPGRADING)||
 
 
 ### ConfigFile
@@ -262,7 +262,7 @@ Set of configuration properties that can be injected into the service components
 
 |Name|Description|Required|Schema|Default|
 |----|----|----|----|----|
-|properties|A blob of key-value pairs for configuring YARN service AM.|false|object||
+|properties|A blob of key-value pairs for configuring the YARN service AM.|false|object||
 |env|A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.|false|object||
 |files|Array of list of files that needs to be created and made available as volumes in the service component containers.|false|ConfigFile array||
 
@@ -300,8 +300,8 @@ The kerberos principal info of the user who launches the service.
 
 |Name|Description|Required|Schema|Default|
 |----|----|----|----|----|
-|principal_name|The principal name of the user who launches the service.|false|string||
-|keytab|The URI of the kerberos keytab. It supports two modes, URI starts with "hdfs://": A path on hdfs where the keytab is stored. The keytab will be localized by YARN to each host; URI starts with "file://": A path on the local host where the keytab is stored. It is assumed that the keytabs are pre-installed by admins before AM launches.|false|string||
+|principal_name|The principal name of the user who launches the service. Note that `_HOST` is required in the `principal_name` field such as `testuser/_HOST@EXAMPLE.COM` because Hadoop client validates that the server's (in this case, the AM's) principal has hostname present when communicating to the server.|false|string||
+|keytab|The URI of the kerberos keytab. Currently supports only files present on the bare host. URI starts with "file://" - A path on the local host where the keytab is stored. It is assumed that admin pre-installs the keytabs on the local host before AM launches.|false|string||
 
 
 ### PlacementConstraint
@@ -369,7 +369,7 @@ Resource determines the amount of resources (vcores, memory, network, etc.) usab
 |profile|Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.|false|string||
 |cpus|Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).|false|integer (int32)||
 |memory|Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.|false|string||
-|additional|A map of resource type name to resource type information. Including value (integer), and unit (string). This will be used to specify resource other than cpu and memory. Please refer to example below. |  false | object ||
+|additional|A map of resource type name to resource type information. Including value (integer), and unit (string). This will be used to specify resource other than cpu and memory. Please refer to example below.|false|object||
 
 
 ### ResourceInformation
@@ -402,16 +402,17 @@ a service resource has the following attributes.
 |state|State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state.|false|ServiceState||
 |quicklinks|A blob of key-value pairs of quicklinks to be exported for a service.|false|object||
 |queue|The YARN queue that this service should be submitted to.|false|string||
-|kerberos_principal | The principal info of the user who launches the service|false|KerberosPrincipal||
+|kerberos_principal|The principal info of the user who launches the service|false|KerberosPrincipal||
 |docker_client_config|URI of the file containing the docker client configuration (e.g. hdfs:///tmp/config.json)|false|string||
 
+
 ### ServiceState
 
 The current state of a service.
 
 |Name|Description|Required|Schema|Default|
 |----|----|----|----|----|
-|state|enum of the state of the service|false|enum (ACCEPTED, STARTED, STABLE, STOPPED, FAILED, FLEX)||
+|state|enum of the state of the service|false|enum (ACCEPTED, STARTED, STABLE, STOPPED, FAILED, FLEX, UPGRADING)||
 
 
 ### ServiceStatus
@@ -472,7 +473,6 @@ Note, lifetime value of -1 means unlimited lifetime.
     "components": [
         {
             "name": "hello",
-            "dependencies": [],
             "state": "STABLE",
             "resource": {
                 "cpus": 1,
@@ -536,7 +536,7 @@ PUT URL - http://localhost:8088/app/v1/services/hello-world
 ##### PUT Request JSON
 ```json
 {
-    "state": "STOPPED"
+  "state": "STOPPED"
 }
 ```
 
@@ -546,17 +546,34 @@ PUT URL - http://localhost:8088/app/v1/services/hello-world
 ##### PUT Request JSON
 ```json
 {
-    "state": "STARTED"
+  "state": "STARTED"
 }
 ```
 
-### Update to flex up/down the no of containers (instances) of a component of a service
+### Update to flex up/down the number of containers (instances) of a component of a service
 PUT URL - http://localhost:8088/app/v1/services/hello-world/components/hello
 
 ##### PUT Request JSON
 ```json
 {
-    "number_of_containers": 3
+  "number_of_containers": 3
+}
+```
+
+Alternatively, you can specify the entire "components" section instead.
+
+PUT URL - http://localhost:8088/app/v1/services/hello-world
+##### PUT Request JSON
+```json
+{
+  "state": "FLEX",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 3
+      }
+    ]
 }
 ```
 
@@ -727,6 +744,14 @@ POST URL - http://localhost:8088/app/v1/services
             {
               "type": "ANTI_AFFINITY",
               "scope": "NODE",
+              "node_attributes": {
+                "os": ["centos6", "centos7"],
+                "fault_domain": ["fd1", "fd2"]
+              },
+              "node_partitions": [
+                "gpu",
+                "fast-disk"
+              ],
               "target_tags": [
                 "hello"
               ]
@@ -756,7 +781,6 @@ fulfilled and the service will be in non-STABLE state.
     "components": [
         {
             "name": "hello",
-            "dependencies": [],
             "state": "STABLE",
             "resource": {
                 "cpus": 1,
@@ -767,8 +791,14 @@ fulfilled and the service will be in non-STABLE state.
                 {
                   "type": "ANTI_AFFINITY",
                   "scope": "NODE",
-                  "node_attributes": {},
-                  "node_partitions": [],
+                  "node_attributes": {
+                    "os": ["centos6", "centos7"],
+                    "fault_domain": ["fd1", "fd2"]
+                  },
+                  "node_partitions": [
+                    "gpu",
+                    "fast-disk"
+                  ],
                   "target_tags": [
                     "hello"
                   ]
@@ -858,3 +888,4 @@ POST URL - http://localhost:8088/app/v1/services
     ]
 }
 ```
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-8217. RmAuthenticationFilterInitializer and TimelineAuthenticationFilterInitializer should use Configuration.getPropsWithPrefix instead of iterator. Contributed by Suma Shivaprasad.

Posted by xk...@apache.org.
YARN-8217. RmAuthenticationFilterInitializer and TimelineAuthenticationFilterInitializer should use Configuration.getPropsWithPrefix instead of iterator. Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee2ce923
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee2ce923
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee2ce923

Branch: refs/heads/HDFS-12943
Commit: ee2ce923a922bfc3e89ad6f0f6a25e776fe91ffb
Parents: 85381c7
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Thu May 3 10:01:02 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Thu May 3 14:43:40 2018 +0530

----------------------------------------------------------------------
 .../http/RMAuthenticationFilterInitializer.java | 51 ++----------
 ...TimelineAuthenticationFilterInitializer.java | 47 +++---------
 .../security/TestRMAuthenticationFilter.java    | 81 ++++++++++++++++++++
 3 files changed, 98 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee2ce923/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
index 9fc1334..d0cde9e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
@@ -18,23 +18,13 @@
 
 package org.apache.hadoop.yarn.server.security.http;
 
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
@@ -43,48 +33,23 @@ import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 public class RMAuthenticationFilterInitializer extends FilterInitializer {
 
   String configPrefix;
-  String kerberosPrincipalProperty;
-  String cookiePath;
 
   public RMAuthenticationFilterInitializer() {
     this.configPrefix = "hadoop.http.authentication.";
-    this.kerberosPrincipalProperty = KerberosAuthenticationHandler.PRINCIPAL;
-    this.cookiePath = "/";
   }
 
   protected Map<String, String> createFilterConfig(Configuration conf) {
-    Map<String, String> filterConfig = new HashMap<String, String>();
-
-    // setting the cookie path to root '/' so it is used for all resources.
-    filterConfig.put(AuthenticationFilter.COOKIE_PATH, cookiePath);
+    Map<String, String> filterConfig = AuthenticationFilterInitializer
+        .getFilterConfigMap(conf, configPrefix);
 
     // Before conf object is passed in, RM has already processed it and used RM
     // specific configs to overwrite hadoop common ones. Hence we just need to
     // source hadoop.proxyuser configs here.
-    for (Map.Entry<String, String> entry : conf) {
-      String propName = entry.getKey();
-      if (propName.startsWith(configPrefix)) {
-        String value = conf.get(propName);
-        String name = propName.substring(configPrefix.length());
-        filterConfig.put(name, value);
-      } else if (propName.startsWith(ProxyUsers.CONF_HADOOP_PROXYUSER)) {
-        String value = conf.get(propName);
-        String name = propName.substring("hadoop.".length());
-        filterConfig.put(name, value);
-      }
-    }
 
-    // Resolve _HOST into bind address
-    String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
-    String principal = filterConfig.get(kerberosPrincipalProperty);
-    if (principal != null) {
-      try {
-        principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
-      } catch (IOException ex) {
-        throw new RuntimeException(
-          "Could not resolve Kerberos principal name: " + ex.toString(), ex);
-      }
-      filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal);
+    //Add proxy user configs
+    for (Map.Entry<String, String> entry : conf.
+        getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) {
+      filterConfig.put("proxyuser" + entry.getKey(), entry.getValue());
     }
 
     filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
@@ -95,10 +60,8 @@ public class RMAuthenticationFilterInitializer extends FilterInitializer {
 
   @Override
   public void initFilter(FilterContainer container, Configuration conf) {
-
     Map<String, String> filterConfig = createFilterConfig(conf);
     container.addFilter("RMAuthenticationFilter",
       RMAuthenticationFilter.class.getName(), filterConfig);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee2ce923/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
index 3d8ce05..96c3cdf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -22,8 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
@@ -33,7 +32,6 @@ import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAu
 import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler;
 import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 
-import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -62,42 +60,17 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
   protected void setAuthFilterConfig(Configuration conf) {
     filterConfig = new HashMap<String, String>();
 
-    // setting the cookie path to root '/' so it is used for all resources.
-    filterConfig.put(AuthenticationFilter.COOKIE_PATH, "/");
-
-    for (Map.Entry<String, String> entry : conf) {
-      String name = entry.getKey();
-      if (name.startsWith(ProxyUsers.CONF_HADOOP_PROXYUSER)) {
-        String value = conf.get(name);
-        name = name.substring("hadoop.".length());
-        filterConfig.put(name, value);
-      }
-    }
-    for (Map.Entry<String, String> entry : conf) {
-      String name = entry.getKey();
-      if (name.startsWith(PREFIX)) {
-        // yarn.timeline-service.http-authentication.proxyuser will override
-        // hadoop.proxyuser
-        String value = conf.get(name);
-        name = name.substring(PREFIX.length());
-        filterConfig.put(name, value);
-      }
+    for (Map.Entry<String, String> entry : conf
+        .getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) {
+      filterConfig.put("proxyuser" + entry.getKey(), entry.getValue());
     }
 
-    // Resolve _HOST into bind address
-    String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
-    String principal =
-        filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
-    if (principal != null) {
-      try {
-        principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
-      } catch (IOException ex) {
-        throw new RuntimeException("Could not resolve Kerberos principal " +
-            "name: " + ex.toString(), ex);
-      }
-      filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
-          principal);
-    }
+    // yarn.timeline-service.http-authentication.proxyuser will override
+    // hadoop.proxyuser
+    Map<String, String> timelineAuthProps =
+        AuthenticationFilterInitializer.getFilterConfigMap(conf, PREFIX);
+
+    filterConfig.putAll(timelineAuthProps);
   }
 
   protected Map<String, String> getFilterConfig() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee2ce923/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMAuthenticationFilter.java
new file mode 100644
index 0000000..4190cc6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMAuthenticationFilter.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilter;
+import org.apache.hadoop.yarn.server.security.http
+    .RMAuthenticationFilterInitializer;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Test RM Auth filter.
+ */
+public class TestRMAuthenticationFilter {
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testConfiguration() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set("hadoop.http.authentication.foo", "bar");
+    conf.set("hadoop.proxyuser.user.foo", "bar1");
+
+    conf.set(HttpServer2.BIND_ADDRESS, "barhost");
+
+    FilterContainer container = Mockito.mock(FilterContainer.class);
+    Mockito.doAnswer(new Answer() {
+      @Override
+      public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+        Object[] args = invocationOnMock.getArguments();
+
+        assertEquals("RMAuthenticationFilter", args[0]);
+
+        assertEquals(RMAuthenticationFilter.class.getName(), args[1]);
+
+        Map<String, String> conf = (Map<String, String>) args[2];
+        assertEquals("/", conf.get("cookie.path"));
+
+        assertEquals("simple", conf.get("type"));
+        assertEquals("36000", conf.get("token.validity"));
+        assertNull(conf.get("cookie.domain"));
+        assertEquals("true", conf.get("simple.anonymous.allowed"));
+        assertEquals("HTTP/barhost@LOCALHOST", conf.get("kerberos.principal"));
+        assertEquals(System.getProperty("user.home") + "/hadoop.keytab",
+            conf.get("kerberos.keytab"));
+        assertEquals("bar", conf.get("foo"));
+        assertEquals("bar1", conf.get("proxyuser.user.foo"));
+
+        return null;
+      }
+    }).when(container).addFilter(Mockito.<String>anyObject(),
+        Mockito.<String>anyObject(), Mockito.<Map<String, String>>anyObject());
+
+    new RMAuthenticationFilterInitializer().initFilter(container, conf);
+  }
+}
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDFS-11807. libhdfs++: Get minidfscluster tests running under valgrind. Contributed by Anatoli Shein.

Posted by xk...@apache.org.
HDFS-11807. libhdfs++: Get minidfscluster tests running under valgrind.  Contributed by Anatoli Shein.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19ae588f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19ae588f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19ae588f

Branch: refs/heads/HDFS-12943
Commit: 19ae588fde9930c042cdb2848b8a1a0ff514b575
Parents: fe649bb
Author: James Clampffer <jh...@apache.org>
Authored: Wed May 2 11:49:12 2018 -0400
Committer: James Clampffer <jh...@apache.org>
Committed: Wed May 2 11:49:12 2018 -0400

----------------------------------------------------------------------
 .../src/main/native/libhdfs-tests/expect.h      |  60 +++++
 .../libhdfs-tests/test_libhdfs_mini_stress.c    | 253 ++++++++++++++-----
 .../src/main/native/libhdfspp/CMakeLists.txt    |   2 +-
 .../main/native/libhdfspp/tests/CMakeLists.txt  |   6 +
 .../main/native/libhdfspp/tests/memcheck.supp   |  27 ++
 5 files changed, 279 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ae588f/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
index 528c96f..d843b67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
@@ -132,6 +132,54 @@ struct hdfsFile_internal;
         } \
     } while (0);
 
+#define EXPECT_INT_LT(x, y) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ >= (y)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): expected less than %d\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_INT_LE(x, y) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ > (y)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): expected less than or equal %d\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_INT_GT(x, y) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ <= (y)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): expected greater than %d\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_INT_GE(x, y) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ < (y)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): expected greater than or equal %d\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+            return -1; \
+        } \
+    } while (0);
+
 #define EXPECT_INT64_EQ(x, y) \
     do { \
         int64_t __my_ret__ = y; \
@@ -144,6 +192,18 @@ struct hdfsFile_internal;
         } \
     } while (0);
 
+#define ASSERT_INT64_EQ(x, y) \
+    do { \
+        int64_t __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "value %"PRId64" (errno: %d): expected %"PRId64"\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            exit(EXIT_FAILURE); \
+        } \
+    } while (0);
+
 #define EXPECT_UINT64_EQ(x, y) \
     do { \
         uint64_t __my_ret__ = y; \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ae588f/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
index dca4782..9054287 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+#include "common/util_c.h"
 #include "expect.h"
 #include "hdfs/hdfs.h"
 #include "hdfspp/hdfs_ext.h"
@@ -24,10 +25,15 @@
 
 #include <errno.h>
 #include <inttypes.h>
+#include <pwd.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
 
 #define TO_STR_HELPER(X) #X
 #define TO_STR(X) TO_STR_HELPER(X)
@@ -44,6 +50,8 @@
 
 #define TLH_DEFAULT_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS 5
 
+#define MAX_DIGITS_IN_INT 10
+
 #ifndef RANDOM_ERROR_RATIO
 #define RANDOM_ERROR_RATIO 1000000000
 #endif
@@ -62,15 +70,13 @@ struct tlhThreadInfo {
 
 };
 
-static int hdfsNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
+static int hdfsNameNodeConnect(tPort port, hdfsFS *fs,
                                const char *username)
 {
   int ret;
-  tPort port;
   hdfsFS hdfs;
   struct hdfsBuilder *bld;
 
-  port = (tPort)nmdGetNameNodePort(cl);
   if (port < 0) {
     fprintf(stderr, "hdfsNameNodeConnect: nmdGetNameNodePort "
             "returned error %d\n", port);
@@ -104,6 +110,44 @@ static int hdfsNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
   return 0;
 }
 
+#ifdef VALGRIND
+static int hdfsCurlData(const char *host, const tPort port, const char *dirNm,
+                         const char *fileNm, tSize fileSz)
+{
+  int ret;
+  const char *content;
+  content = fileNm;
+  char tmpFile[14] = "stress_XXXXXX";
+
+  // Retrieve user id (always successful)
+  uid_t uid = geteuid();
+  // Retrieve password struct entry
+  struct passwd *pw;
+  EXPECT_NONNULL(pw = getpwuid(uid));
+
+  int fd = -1;
+  EXPECT_NONNEGATIVE(fd = mkstemp(tmpFile));
+
+  tSize sz = 0;
+  while (sz < fileSz) {
+    EXPECT_NONNEGATIVE(ret = write(fd, content, strlen(content)));
+    sz += ret;
+  }
+
+  int curlSize = 200;
+  char curlStr[curlSize];
+  ret = snprintf(curlStr,curlSize,"curl -L -i -X PUT -T %s \"http://%s:%d/webhdfs/v1%s?op=CREATE&overwrite=true&user.name=%s\"",tmpFile,host,(int)port,fileNm,pw->pw_name);
+  //Check for errors during snprintf
+  EXPECT_NONNEGATIVE(ret);
+  //Check for truncation during snprintf
+  EXPECT_INT_LT(ret, curlSize);
+
+  EXPECT_ZERO(system(curlStr));
+  EXPECT_ZERO(unlink(tmpFile));
+
+  return 0;
+}
+#else
 static int hdfsWriteData(hdfsFS hdfs, const char *dirNm,
                          const char *fileNm, tSize fileSz)
 {
@@ -142,6 +186,7 @@ static int hdfsWriteData(hdfsFS hdfs, const char *dirNm,
   EXPECT_ZERO(hdfsCloseFile(hdfs, file));
   return 0;
 }
+#endif
 
 static int fileEventCallback1(const char * event, const char * cluster, const char * file, int64_t value, int64_t cookie)
 {
@@ -223,6 +268,7 @@ static int doTestHdfsMiniStress(struct tlhThreadInfo *ti, int randomErr)
   fprintf(stderr, "testHdfsMiniStress(threadIdx=%d): finished read loop\n",
           ti->threadIdx);
   EXPECT_ZERO(nErrs);
+  hdfsFreeFileInfo(fileInfo, 1);
   return 0;
 }
 
@@ -275,76 +321,147 @@ static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
 
 /**
  * Test intended to stress libhdfs client with concurrent requests. Currently focused
- * on concurrent reads.
+ * on concurrent reads. In order to run this test under valgrind and avoid JVM issues
+ * we fork a child process that runs a mini dfs cluster, and the parent process
+ * communicates with it using a socket pair.
  */
-int main(void)
+int main(int argc, char *argv[])
 {
-  int i, tlhNumThreads;
-  char *dirNm, *fileNm;
-  tSize fileSz;
-  const char *tlhNumThreadsStr, *tlhNumDNsStr;
-  hdfsFS hdfs = NULL;
-  struct NativeMiniDfsCluster* tlhCluster;
-  struct tlhThreadInfo ti[TLH_MAX_THREADS];
-  struct NativeMiniDfsConf conf = {
+  tPort port;
+#ifdef VALGRIND
+  int httpPort;
+  char * httpHost;
+  size_t hostSize;
+  int fds[2];
+  static const int parentsocket = 0;
+  static const int childsocket = 1;
+  int status;
+  // If there is an argument, the child code executes and starts a mini dfs cluster
+  if (argc > 1) {
+    // The argument contains child socket
+    fds[childsocket] = (int) strtol(argv[1],NULL,10);
+#endif
+    const char *tlhNumDNsStr;
+    struct NativeMiniDfsCluster* tlhCluster;
+    struct NativeMiniDfsConf conf = {
       1, /* doFormat */
-  };
-
-  dirNm = "/tlhMiniStressData";
-  fileNm = "/tlhMiniStressData/file";
-  fileSz = 2*1024*1024;
-
-  tlhNumDNsStr = getenv("TLH_NUM_DNS");
-  if (!tlhNumDNsStr) {
-    tlhNumDNsStr = "1";
-  }
-  conf.numDataNodes = atoi(tlhNumDNsStr);
-  if ((conf.numDataNodes <= 0) || (conf.numDataNodes > TLH_MAX_DNS)) {
-    fprintf(stderr, "testLibHdfsMiniStress: must have a number of datanodes "
-            "between 1 and %d inclusive, not %d\n",
-            TLH_MAX_DNS, conf.numDataNodes);
-    return EXIT_FAILURE;
-  }
-
-  tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
-  if (!tlhNumThreadsStr) {
-    tlhNumThreadsStr = "8";
-  }
-  tlhNumThreads = atoi(tlhNumThreadsStr);
-  if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
-    fprintf(stderr, "testLibHdfsMiniStress: must have a number of threads "
-            "between 1 and %d inclusive, not %d\n",
-            TLH_MAX_THREADS, tlhNumThreads);
-    return EXIT_FAILURE;
-  }
-  memset(&ti[0], 0, sizeof(ti));
-  for (i = 0; i < tlhNumThreads; i++) {
-    ti[i].threadIdx = i;
-  }
-
-  tlhCluster = nmdCreate(&conf);
-  EXPECT_NONNULL(tlhCluster);
-  EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
+      1, /* webhdfs */
+      0, /* webhdfs port */
+      1  /* shortcircuit */
+    };
+    tlhNumDNsStr = getenv("TLH_NUM_DNS");
+    if (!tlhNumDNsStr) {
+      tlhNumDNsStr = "1";
+    }
+    conf.numDataNodes = atoi(tlhNumDNsStr);
+    if ((conf.numDataNodes <= 0) || (conf.numDataNodes > TLH_MAX_DNS)) {
+      fprintf(stderr, "testLibHdfsMiniStress: must have a number of datanodes "
+              "between 1 and %d inclusive, not %d\n",
+              TLH_MAX_DNS, conf.numDataNodes);
+      return EXIT_FAILURE;
+    }
+    tlhCluster = nmdCreate(&conf);
+    EXPECT_NONNULL(tlhCluster);
+    EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
+    port = (tPort)nmdGetNameNodePort(tlhCluster);
+#ifdef VALGRIND
+    EXPECT_ZERO(nmdGetNameNodeHttpAddress(tlhCluster, &httpPort, (const char **) &httpHost));
+    hostSize = strlen(httpHost) + 1;
+    // The child is sending hdfs port, webhdfs port, hostname size, and hostname to the parent
+    ASSERT_INT64_EQ(write(fds[childsocket], &port, sizeof(tPort)), sizeof(tPort));
+    ASSERT_INT64_EQ(write(fds[childsocket], &httpPort, sizeof(int)), sizeof(int));
+    ASSERT_INT64_EQ(write(fds[childsocket], &hostSize, sizeof(size_t)), sizeof(size_t));
+    ASSERT_INT64_EQ(write(fds[childsocket], httpHost, hostSize), hostSize);
+    free(httpHost);
+    // The child is waiting for the parent to finish and send a message
+    ASSERT_INT64_EQ(read(fds[childsocket], &port, sizeof(tPort)), sizeof(tPort));
+    EXPECT_ZERO(nmdShutdown(tlhCluster));
+    nmdFree(tlhCluster);
+  } else { // If there is no argument, the parent code executes
+#endif
+    hdfsFS hdfs = NULL;
+    int i, tlhNumThreads;
+    char *dirNm, *fileNm;
+    tSize fileSz;
+    const char *tlhNumThreadsStr;
+    struct tlhThreadInfo ti[TLH_MAX_THREADS];
+
+    dirNm = "/tlhMiniStressData";
+    fileNm = "/tlhMiniStressData/file";
+    fileSz = 2*1024*1024;
+
+    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
+    if (!tlhNumThreadsStr) {
+      tlhNumThreadsStr = "8";
+    }
+    tlhNumThreads = atoi(tlhNumThreadsStr);
+    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
+      fprintf(stderr, "testLibHdfsMiniStress: must have a number of threads "
+              "between 1 and %d inclusive, not %d\n",
+              TLH_MAX_THREADS, tlhNumThreads);
+      return EXIT_FAILURE;
+    }
+    memset(&ti[0], 0, sizeof(ti));
+    for (i = 0; i < tlhNumThreads; i++) {
+      ti[i].threadIdx = i;
+    }
 
-  EXPECT_ZERO(hdfsNameNodeConnect(tlhCluster, &hdfs, NULL));
+#ifdef VALGRIND
+    EXPECT_ZERO(socketpair(PF_LOCAL, SOCK_STREAM, 0, fds));
+    // Forking off a child to execute JVM stuff
+    pid_t pid = fork();
+    if(pid == 0){
+      // The child execs this program from the beginning and passes
+      // its file descriptor as a command line argument.
+      char ch_fd[MAX_DIGITS_IN_INT + 1];
+      sprintf(ch_fd, "%d", fds[childsocket]);
+      // This has to be done with exec() to prevent valgrind from tracing the child
+      execl(argv[0], argv[0], ch_fd, NULL);
+      // This code should never execute
+      fprintf(stderr, "execl() failed.\n");
+      return EXIT_FAILURE;
+    }
+    close(fds[childsocket]);
+    // The parent is receiving hdfs port, webhdfs port, hostname size, and hostname from the child
+    ASSERT_INT64_EQ(read(fds[parentsocket], &port, sizeof(tPort)), sizeof(tPort));
+    ASSERT_INT64_EQ(read(fds[parentsocket], &httpPort, sizeof(int)), sizeof(int));
+    ASSERT_INT64_EQ(read(fds[parentsocket], &hostSize, sizeof(size_t)), sizeof(size_t));
+    httpHost = malloc(hostSize);
+    ASSERT_INT64_EQ(read(fds[parentsocket], httpHost, hostSize), hostSize);
+    EXPECT_ZERO(hdfsNameNodeConnect(port, &hdfs, NULL));
+    EXPECT_ZERO(hdfsCurlData(httpHost, httpPort, dirNm, fileNm, fileSz));
+    free(httpHost);
+#else
+    EXPECT_ZERO(hdfsNameNodeConnect(port, &hdfs, NULL));
+    EXPECT_ZERO(hdfsWriteData(hdfs, dirNm, fileNm, fileSz));
+#endif
 
-  // Single threaded writes for now.
-  EXPECT_ZERO(hdfsWriteData(hdfs, dirNm, fileNm, fileSz));
+    // Multi-threaded reads.
+    for (i = 0; i < tlhNumThreads; i++) {
+      ti[i].theThread.start = testHdfsMiniStress;
+      ti[i].theThread.arg = &ti[i];
+      ti[i].hdfs = hdfs;
+      ti[i].fileNm = fileNm;
+      EXPECT_ZERO(threadCreate(&ti[i].theThread));
+    }
+    for (i = 0; i < tlhNumThreads; i++) {
+      EXPECT_ZERO(threadJoin(&ti[i].theThread));
+    }
 
-  // Multi-threaded reads.
-  for (i = 0; i < tlhNumThreads; i++) {
-    ti[i].theThread.start = testHdfsMiniStress;
-    ti[i].theThread.arg = &ti[i];
-    ti[i].hdfs = hdfs;
-    ti[i].fileNm = fileNm;
-    EXPECT_ZERO(threadCreate(&ti[i].theThread));
-  }
-  for (i = 0; i < tlhNumThreads; i++) {
-    EXPECT_ZERO(threadJoin(&ti[i].theThread));
+    EXPECT_ZERO(hdfsDisconnect(hdfs));
+    EXPECT_ZERO(checkFailures(ti, tlhNumThreads));
+#ifdef VALGRIND
+    //Send this message to the child to notify it that it can now shut down
+    ASSERT_INT64_EQ(write(fds[parentsocket], &port, sizeof(tPort)), sizeof(tPort));
+    // Wait for the child to exit and verify it returned EXIT_SUCCESS
+    waitpid(pid, &status, 0);
+    EXPECT_ZERO(status);
   }
-
-  EXPECT_ZERO(hdfsDisconnect(hdfs));
-  EXPECT_ZERO(nmdShutdown(tlhCluster));
-  nmdFree(tlhCluster);
-  return checkFailures(ti, tlhNumThreads);
+#else
+    EXPECT_ZERO(nmdShutdown(tlhCluster));
+    nmdFree(tlhCluster);
+#endif
+  // Clean up static data and prevent valgrind memory leaks
+  ShutdownProtobufLibrary_C();
+  return EXIT_SUCCESS;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ae588f/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
index 94b1b56..63fa80d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
@@ -90,7 +90,7 @@ if (NOT PROTOC_IS_COMPATIBLE)
 endif (NOT PROTOC_IS_COMPATIBLE)
 
 find_program(MEMORYCHECK_COMMAND valgrind HINTS ${VALGRIND_DIR} )
-set(MEMORYCHECK_COMMAND_OPTIONS "--trace-children=yes --leak-check=full --error-exitcode=1")
+set(MEMORYCHECK_COMMAND_OPTIONS "--trace-children=no --leak-check=full --error-exitcode=1 --suppressions=${PROJECT_SOURCE_DIR}/tests/memcheck.supp")
 message(STATUS "valgrind location: ${MEMORYCHECK_COMMAND}")
 
 if (REQUIRE_VALGRIND AND MEMORYCHECK_COMMAND MATCHES "MEMORYCHECK_COMMAND-NOTFOUND" )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ae588f/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
index 3331935..6157902 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
@@ -141,6 +141,7 @@ include_directories (
 )
 
 add_library(hdfspp_test_shim_static STATIC hdfs_shim.c libhdfs_wrapper.c libhdfspp_wrapper.cc ${LIBHDFSPP_BINDING_C}/hdfs.cc)
+add_library(hdfspp_test_static STATIC ${LIBHDFSPP_BINDING_C}/hdfs.cc)
 
 # TODO: get all of the mini dfs library bits here in one place
 # add_library(hdfspp_mini_cluster     native_mini_dfs ${JAVA_JVM_LIBRARY} )
@@ -155,6 +156,11 @@ build_libhdfs_test(hdfspp_mini_dfs_smoke hdfspp_test_shim_static ${CMAKE_CURRENT
 link_libhdfs_test (hdfspp_mini_dfs_smoke hdfspp_test_shim_static fs reader rpc proto common connection gmock_main ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} native_mini_dfs ${JAVA_JVM_LIBRARY} ${SASL_LIBRARIES})
 add_libhdfs_test  (hdfspp_mini_dfs_smoke hdfspp_test_shim_static)
 
+build_libhdfs_test(libhdfs_mini_stress_valgrind hdfspp_test_static expect.c test_libhdfs_mini_stress.c ${OS_DIR}/thread.c)
+link_libhdfs_test(libhdfs_mini_stress_valgrind hdfspp_test_static fs reader rpc proto common connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} native_mini_dfs ${JAVA_JVM_LIBRARY} ${SASL_LIBRARIES})
+add_memcheck_test(libhdfs_mini_stress_valgrind_hdfspp_test_static libhdfs_mini_stress_valgrind_hdfspp_test_static)
+set_target_properties(libhdfs_mini_stress_valgrind_hdfspp_test_static PROPERTIES COMPILE_DEFINITIONS "VALGRIND")
+
 build_libhdfs_test(libhdfs_mini_stress hdfspp_test_shim_static expect.c test_libhdfs_mini_stress.c ${OS_DIR}/thread.c)
 link_libhdfs_test(libhdfs_mini_stress hdfspp_test_shim_static fs reader rpc proto common connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} native_mini_dfs ${JAVA_JVM_LIBRARY} ${SASL_LIBRARIES})
 add_libhdfs_test(libhdfs_mini_stress hdfspp_test_shim_static)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ae588f/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/memcheck.supp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/memcheck.supp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/memcheck.supp
new file mode 100644
index 0000000..cf80d07
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/memcheck.supp
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{
+   noai6ai_cached suppression
+   Memcheck:Free
+   fun:free
+   fun:__libc_freeres
+   fun:_vgnU_freeres
+   fun:__run_exit_handlers
+   fun:exit
+   ...
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: YARN-6385. Fix checkstyle warnings in TestFileSystemApplicationHistoryStore

Posted by xk...@apache.org.
YARN-6385. Fix checkstyle warnings in TestFileSystemApplicationHistoryStore

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3265b551
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3265b551
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3265b551

Branch: refs/heads/HDFS-12943
Commit: 3265b55119d39ecbda6d75be04a9a1bf59c631f1
Parents: e07156e
Author: Yiqun Lin <yi...@vipshop.com>
Authored: Wed May 2 18:14:02 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 2 18:14:02 2018 +0900

----------------------------------------------------------------------
 .../TestFileSystemApplicationHistoryStore.java    | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3265b551/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
index df4adbe..6b068c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -272,20 +272,20 @@ public class TestFileSystemApplicationHistoryStore extends
     tearDown();
 
     // Setup file system to inject startup conditions
-    FileSystem fs = spy(new RawLocalFileSystem());
+    FileSystem fileSystem = spy(new RawLocalFileSystem());
     FileStatus fileStatus = Mockito.mock(FileStatus.class);
     doReturn(true).when(fileStatus).isDirectory();
-    doReturn(fileStatus).when(fs).getFileStatus(any(Path.class));
+    doReturn(fileStatus).when(fileSystem).getFileStatus(any(Path.class));
 
     try {
-      initAndStartStore(fs);
+      initAndStartStore(fileSystem);
     } catch (Exception e) {
       Assert.fail("Exception should not be thrown: " + e);
     }
 
     // Make sure that directory creation was not attempted
     verify(fileStatus, never()).isDirectory();
-    verify(fs, times(1)).mkdirs(any(Path.class));
+    verify(fileSystem, times(1)).mkdirs(any(Path.class));
   }
 
   @Test
@@ -294,14 +294,14 @@ public class TestFileSystemApplicationHistoryStore extends
     tearDown();
 
     // Setup file system to inject startup conditions
-    FileSystem fs = spy(new RawLocalFileSystem());
+    FileSystem fileSystem = spy(new RawLocalFileSystem());
     FileStatus fileStatus = Mockito.mock(FileStatus.class);
     doReturn(false).when(fileStatus).isDirectory();
-    doReturn(fileStatus).when(fs).getFileStatus(any(Path.class));
-    doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
+    doReturn(fileStatus).when(fileSystem).getFileStatus(any(Path.class));
+    doThrow(new IOException()).when(fileSystem).mkdirs(any(Path.class));
 
     try {
-      initAndStartStore(fs);
+      initAndStartStore(fileSystem);
       Assert.fail("Exception should have been thrown");
     } catch (Exception e) {
       // Expected failure
@@ -309,6 +309,6 @@ public class TestFileSystemApplicationHistoryStore extends
 
     // Make sure that directory creation was attempted
     verify(fileStatus, never()).isDirectory();
-    verify(fs, times(1)).mkdirs(any(Path.class));
+    verify(fileSystem, times(1)).mkdirs(any(Path.class));
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

Posted by xk...@apache.org.
Merge branch 'trunk' into HDFS-12943

# Conflicts:
#	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
#	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a38fde5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a38fde5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a38fde5d

Branch: refs/heads/HDFS-12943
Commit: a38fde5d0a9c0a8d20204c9b546e6f0de58b6a2a
Parents: f8ee212 96c843f
Author: Erik Krogen <xk...@apache.org>
Authored: Fri May 4 12:25:45 2018 -0700
Committer: Erik Krogen <xk...@apache.org>
Committed: Fri May 4 12:25:45 2018 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    |     2 +
 LICENSE.txt                                     |    68 +
 dev-support/bin/dist-layout-stitching           |    22 +-
 dev-support/docker/Dockerfile                   |     3 +
 .../assemblies/hadoop-src-with-hdsl.xml         |    56 +
 .../main/resources/assemblies/hadoop-src.xml    |     2 +
 .../ensure-jars-have-correct-contents.sh        |     6 +
 .../hadoop-client-minicluster/pom.xml           |     7 +
 .../hadoop-client-runtime/pom.xml               |     1 +
 .../src/main/bin/hadoop-functions.sh            |    14 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |    17 +
 .../src/main/conf/log4j.properties              |    34 +
 .../org/apache/hadoop/conf/Configuration.java   |    11 +-
 .../crypto/key/kms/KMSClientProvider.java       |   212 +-
 .../crypto/key/kms/KMSDelegationToken.java      |    22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |    56 +
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  |   103 +
 .../hadoop/crypto/key/kms/package-info.java     |    18 +
 .../apache/hadoop/fs/ChecksumFileSystem.java    |     9 +-
 .../hadoop/fs/CommonConfigurationKeys.java      |     4 +
 .../fs/CommonConfigurationKeysPublic.java       |    10 +
 .../hadoop/fs/CompositeCrcFileChecksum.java     |    82 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |     2 +-
 .../main/java/org/apache/hadoop/fs/Options.java |    11 +
 .../org/apache/hadoop/fs/shell/Command.java     |    69 +-
 .../apache/hadoop/fs/shell/CopyCommands.java    |     6 +
 .../java/org/apache/hadoop/fs/shell/Ls.java     |    26 +-
 .../org/apache/hadoop/fs/shell/PathData.java    |    27 +
 .../main/java/org/apache/hadoop/ipc/Client.java |    16 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java    |     5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java    |    46 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |    11 +-
 .../apache/hadoop/ipc/WritableRpcEngine.java    |     2 +-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |    11 +-
 .../hadoop/metrics2/impl/MetricsConfig.java     |    50 +-
 .../java/org/apache/hadoop/net/NetUtils.java    |    16 +
 .../AuthenticationFilterInitializer.java        |    10 +-
 .../hadoop/security/UserGroupInformation.java   |    10 +-
 .../web/DelegationTokenAuthenticatedURL.java    |    21 +-
 .../DelegationTokenAuthenticationHandler.java   |     8 +-
 .../web/DelegationTokenAuthenticator.java       |     2 +-
 .../hadoop/service/launcher/IrqHandler.java     |     2 +-
 .../java/org/apache/hadoop/util/ConfTest.java   |    10 +-
 .../org/apache/hadoop/util/CrcComposer.java     |   187 +
 .../java/org/apache/hadoop/util/CrcUtil.java    |   220 +
 .../org/apache/hadoop/util/DataChecksum.java    |    18 +
 .../hadoop/util/GenericOptionsParser.java       |     3 +
 .../java/org/apache/hadoop/util/KMSUtil.java    |    45 +-
 .../hadoop/util/KMSUtilFaultInjector.java       |    49 +
 .../hadoop/util/concurrent/HadoopExecutors.java |    34 +-
 ...apache.hadoop.security.token.TokenIdentifier |     1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |     3 +-
 .../src/main/resources/core-default.xml         |    41 +
 .../src/site/markdown/CommandsManual.md         |    17 +
 .../src/site/markdown/HttpAuthentication.md     |     2 +-
 .../markdown/release/3.0.2/CHANGES.3.0.2.md     |    31 +
 .../release/3.0.2/RELEASENOTES.3.0.2.md         |    31 +
 .../markdown/release/3.1.0/CHANGES.3.1.0.md     |  1022 +
 .../release/3.1.0/RELEASENOTES.3.1.0.md         |   199 +
 .../conf/TestCommonConfigurationFields.java     |     3 +
 .../apache/hadoop/conf/TestConfiguration.java   |    26 +-
 ...yptoStreamsWithOpensslAesCtrCryptoCodec.java |     2 +-
 .../crypto/key/kms/TestKMSClientProvider.java   |   162 +
 .../kms/TestLoadBalancingKMSClientProvider.java |    67 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   |     2 +-
 .../fs/contract/AbstractContractCreateTest.java |    12 +-
 .../apache/hadoop/fs/shell/find/TestFind.java   |    34 +-
 .../org/apache/hadoop/http/TestHttpServer.java  |     2 +-
 .../java/org/apache/hadoop/io/TestIOUtils.java  |     2 +-
 .../java/org/apache/hadoop/ipc/TestIPC.java     |     2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |    58 +-
 .../metrics2/impl/TestGraphiteMetrics.java      |     2 +-
 .../hadoop/metrics2/impl/TestStatsDMetrics.java |     2 +-
 .../sink/RollingFileSystemSinkTestBase.java     |     3 +-
 .../org/apache/hadoop/net/TestNetUtils.java     |     8 +
 .../java/org/apache/hadoop/test/Whitebox.java   |   113 +
 .../org/apache/hadoop/util/TestCrcComposer.java |   242 +
 .../org/apache/hadoop/util/TestCrcUtil.java     |   232 +
 .../org/apache/hadoop/util/TestKMSUtil.java     |    65 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   |   521 +-
 .../crypto/key/kms/server/TestKMSAudit.java     |     2 +-
 hadoop-common-project/hadoop-nfs/pom.xml        |     2 +
 .../org/apache/hadoop/portmap/TestPortmap.java  |     2 +-
 hadoop-dist/pom.xml                             |    83 +
 hadoop-dist/src/main/compose/ozone/.env         |    17 +
 .../src/main/compose/ozone/docker-compose.yaml  |    61 +
 .../src/main/compose/ozone/docker-config        |    35 +
 hadoop-hdds/client/pom.xml                      |    49 +
 .../apache/hadoop/hdds/scm/XceiverClient.java   |   192 +
 .../hadoop/hdds/scm/XceiverClientHandler.java   |   202 +
 .../hdds/scm/XceiverClientInitializer.java      |    72 +
 .../hadoop/hdds/scm/XceiverClientManager.java   |   218 +
 .../hadoop/hdds/scm/XceiverClientMetrics.java   |    92 +
 .../hadoop/hdds/scm/XceiverClientRatis.java     |   266 +
 .../scm/client/ContainerOperationClient.java    |   407 +
 .../hadoop/hdds/scm/client/HddsClientUtils.java |   232 +
 .../hadoop/hdds/scm/client/package-info.java    |    23 +
 .../apache/hadoop/hdds/scm/package-info.java    |    23 +
 .../hdds/scm/storage/ChunkInputStream.java      |   261 +
 .../hdds/scm/storage/ChunkOutputStream.java     |   227 +
 .../hadoop/hdds/scm/storage/package-info.java   |    23 +
 .../common/dev-support/findbugsExcludeFile.xml  |    21 +
 hadoop-hdds/common/pom.xml                      |   128 +
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |    23 +
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |   318 +
 .../apache/hadoop/hdds/client/OzoneQuota.java   |   203 +
 .../hadoop/hdds/client/ReplicationFactor.java   |    63 +
 .../hadoop/hdds/client/ReplicationType.java     |    28 +
 .../apache/hadoop/hdds/client/package-info.java |    23 +
 .../hadoop/hdds/conf/HddsConfServlet.java       |   182 +
 .../hadoop/hdds/conf/OzoneConfiguration.java    |   162 +
 .../apache/hadoop/hdds/conf/package-info.java   |    18 +
 .../org/apache/hadoop/hdds/package-info.java    |    23 +
 .../hadoop/hdds/protocol/DatanodeDetails.java   |   353 +
 .../hadoop/hdds/protocol/package-info.java      |    22 +
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   271 +
 .../org/apache/hadoop/hdds/scm/ScmInfo.java     |    81 +
 .../hadoop/hdds/scm/XceiverClientSpi.java       |   129 +
 .../hadoop/hdds/scm/client/ScmClient.java       |   139 +
 .../hadoop/hdds/scm/client/package-info.java    |    24 +
 .../hadoop/hdds/scm/container/ContainerID.java  |    97 +
 .../common/helpers/AllocatedBlock.java          |    77 +
 .../container/common/helpers/ContainerInfo.java |   333 +
 .../common/helpers/DeleteBlockResult.java       |    51 +
 .../scm/container/common/helpers/Pipeline.java  |   253 +
 .../common/helpers/PipelineChannel.java         |   122 +
 .../helpers/StorageContainerException.java      |   104 +
 .../container/common/helpers/package-info.java  |    22 +
 .../hadoop/hdds/scm/container/package-info.java |    18 +
 .../apache/hadoop/hdds/scm/package-info.java    |    24 +
 .../hdds/scm/protocol/LocatedContainer.java     |   127 +
 .../scm/protocol/ScmBlockLocationProtocol.java  |    72 +
 .../hdds/scm/protocol/ScmLocatedBlock.java      |   100 +
 .../StorageContainerLocationProtocol.java       |   124 +
 .../hadoop/hdds/scm/protocol/package-info.java  |    19 +
 ...kLocationProtocolClientSideTranslatorPB.java |   215 +
 .../protocolPB/ScmBlockLocationProtocolPB.java  |    35 +
 ...rLocationProtocolClientSideTranslatorPB.java |   316 +
 .../StorageContainerLocationProtocolPB.java     |    36 +
 .../hdds/scm/protocolPB/package-info.java       |    24 +
 .../scm/storage/ContainerProtocolCalls.java     |   396 +
 .../hadoop/hdds/scm/storage/package-info.java   |    23 +
 .../java/org/apache/hadoop/ozone/OzoneAcl.java  |   231 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |   241 +
 .../org/apache/hadoop/ozone/OzoneConsts.java    |   167 +
 .../apache/hadoop/ozone/common/BlockGroup.java  |    87 +
 .../ozone/common/DeleteBlockGroupResult.java    |    96 +
 .../InconsistentStorageStateException.java      |    51 +
 .../org/apache/hadoop/ozone/common/Storage.java |   248 +
 .../apache/hadoop/ozone/common/StorageInfo.java |   183 +
 .../hadoop/ozone/common/package-info.java       |    18 +
 .../InvalidStateTransitionException.java        |    42 +
 .../ozone/common/statemachine/StateMachine.java |    68 +
 .../ozone/common/statemachine/package-info.java |    21 +
 .../container/common/helpers/ChunkInfo.java     |   185 +
 .../ozone/container/common/helpers/KeyData.java |   170 +
 .../container/common/helpers/package-info.java  |    23 +
 .../org/apache/hadoop/ozone/lease/Lease.java    |   189 +
 .../ozone/lease/LeaseAlreadyExistException.java |    46 +
 .../ozone/lease/LeaseCallbackExecutor.java      |    65 +
 .../hadoop/ozone/lease/LeaseException.java      |    45 +
 .../ozone/lease/LeaseExpiredException.java      |    45 +
 .../apache/hadoop/ozone/lease/LeaseManager.java |   247 +
 .../lease/LeaseManagerNotRunningException.java  |    45 +
 .../ozone/lease/LeaseNotFoundException.java     |    46 +
 .../apache/hadoop/ozone/lease/package-info.java |    26 +
 .../org/apache/hadoop/ozone/package-info.java   |    35 +
 ...kLocationProtocolServerSideTranslatorPB.java |   170 +
 ...rLocationProtocolServerSideTranslatorPB.java |   212 +
 .../hadoop/ozone/protocolPB/package-info.java   |    24 +
 .../hadoop/ozone/web/utils/JsonUtils.java       |    71 +
 .../hadoop/ozone/web/utils/package-info.java    |    19 +
 .../apache/hadoop/utils/BackgroundService.java  |   161 +
 .../org/apache/hadoop/utils/BackgroundTask.java |    28 +
 .../hadoop/utils/BackgroundTaskQueue.java       |    64 +
 .../hadoop/utils/BackgroundTaskResult.java      |    44 +
 .../org/apache/hadoop/utils/BatchOperation.java |    90 +
 .../org/apache/hadoop/utils/EntryConsumer.java  |    38 +
 .../org/apache/hadoop/utils/LevelDBStore.java   |   380 +
 .../apache/hadoop/utils/MetadataKeyFilters.java |   118 +
 .../org/apache/hadoop/utils/MetadataStore.java  |   172 +
 .../hadoop/utils/MetadataStoreBuilder.java      |   126 +
 .../org/apache/hadoop/utils/RocksDBStore.java   |   382 +
 .../apache/hadoop/utils/RocksDBStoreMBean.java  |   144 +
 .../org/apache/hadoop/utils/package-info.java   |    18 +
 .../main/java/org/apache/ratis/RatisHelper.java |   129 +
 .../java/org/apache/ratis/package-info.java     |    22 +
 .../com/google/protobuf/ShadedProtoUtil.java    |    38 +
 .../com/google/protobuf/package-info.java       |    22 +
 .../main/proto/DatanodeContainerProtocol.proto  |   415 +
 .../main/proto/ScmBlockLocationProtocol.proto   |   166 +
 .../StorageContainerLocationProtocol.proto      |   214 +
 hadoop-hdds/common/src/main/proto/hdds.proto    |   170 +
 .../common/src/main/resources/ozone-default.xml |  1049 +
 .../apache/hadoop/hdds/scm/package-info.java    |    21 +
 .../apache/hadoop/ozone/TestMetadataStore.java  |   414 +
 .../org/apache/hadoop/ozone/TestOzoneAcls.java  |   141 +
 .../hadoop/ozone/common/TestStateMachine.java   |   106 +
 .../hadoop/ozone/lease/TestLeaseManager.java    |   374 +
 .../apache/hadoop/ozone/lease/package-info.java |    21 +
 .../org/apache/hadoop/ozone/package-info.java   |    21 +
 .../hadoop/utils/TestRocksDBStoreMBean.java     |    87 +
 .../dev-support/findbugsExcludeFile.xml         |    21 +
 hadoop-hdds/container-service/pom.xml           |   110 +
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |   325 +
 .../org/apache/hadoop/hdds/scm/VersionInfo.java |    81 +
 .../apache/hadoop/hdds/scm/package-info.java    |    19 +
 .../hadoop/ozone/HddsDatanodeService.java       |   242 +
 .../container/common/helpers/ChunkUtils.java    |   346 +
 .../container/common/helpers/ContainerData.java |   326 +
 .../common/helpers/ContainerMetrics.java        |   121 +
 .../common/helpers/ContainerReport.java         |   218 +
 .../common/helpers/ContainerUtils.java          |   442 +
 .../helpers/DeletedContainerBlocksSummary.java  |   103 +
 .../container/common/helpers/FileUtils.java     |    81 +
 .../container/common/helpers/KeyUtils.java      |   148 +
 .../container/common/helpers/package-info.java  |    22 +
 .../container/common/impl/ChunkManagerImpl.java |   235 +
 .../impl/ContainerLocationManagerImpl.java      |   150 +
 .../common/impl/ContainerManagerImpl.java       |  1113 +
 .../common/impl/ContainerReportManagerImpl.java |    90 +
 .../container/common/impl/ContainerStatus.java  |   217 +
 .../common/impl/ContainerStorageLocation.java   |   203 +
 .../ozone/container/common/impl/Dispatcher.java |   713 +
 .../container/common/impl/KeyManagerImpl.java   |   202 +
 .../RandomContainerDeletionChoosingPolicy.java  |    70 +
 .../common/impl/StorageLocationReport.java      |    63 +
 ...NOrderedContainerDeletionChoosingPolicy.java |    91 +
 .../container/common/impl/package-info.java     |    22 +
 .../common/interfaces/ChunkManager.java         |    76 +
 .../ContainerDeletionChoosingPolicy.java        |    46 +
 .../common/interfaces/ContainerDispatcher.java  |    51 +
 .../interfaces/ContainerLocationManager.java    |    58 +
 .../ContainerLocationManagerMXBean.java         |    36 +
 .../common/interfaces/ContainerManager.java     |   280 +
 .../interfaces/ContainerReportManager.java      |    32 +
 .../container/common/interfaces/KeyManager.java |    76 +
 .../common/interfaces/package-info.java         |    20 +
 .../ozone/container/common/package-info.java    |    28 +
 .../statemachine/DatanodeStateMachine.java      |   397 +
 .../statemachine/EndpointStateMachine.java      |   294 +
 .../statemachine/EndpointStateMachineMBean.java |    34 +
 .../statemachine/SCMConnectionManager.java      |   208 +
 .../SCMConnectionManagerMXBean.java             |    27 +
 .../common/statemachine/StateContext.java       |   285 +
 .../background/BlockDeletingService.java        |   239 +
 .../statemachine/background/package-info.java   |    18 +
 .../commandhandler/CloseContainerHandler.java   |   112 +
 .../commandhandler/CommandDispatcher.java       |   177 +
 .../commandhandler/CommandHandler.java          |    59 +
 .../commandhandler/ContainerReportHandler.java  |   114 +
 .../DeleteBlocksCommandHandler.java             |   211 +
 .../commandhandler/package-info.java            |    18 +
 .../common/statemachine/package-info.java       |    28 +
 .../container/common/states/DatanodeState.java  |    55 +
 .../states/datanode/InitDatanodeState.java      |   164 +
 .../states/datanode/RunningDatanodeState.java   |   175 +
 .../common/states/datanode/package-info.java    |    21 +
 .../states/endpoint/HeartbeatEndpointTask.java  |   267 +
 .../states/endpoint/RegisterEndpointTask.java   |   208 +
 .../states/endpoint/VersionEndpointTask.java    |    68 +
 .../common/states/endpoint/package-info.java    |    20 +
 .../container/common/states/package-info.java   |    18 +
 .../common/transport/server/XceiverServer.java  |   130 +
 .../transport/server/XceiverServerHandler.java  |    82 +
 .../server/XceiverServerInitializer.java        |    62 +
 .../transport/server/XceiverServerSpi.java      |    43 +
 .../common/transport/server/package-info.java   |    24 +
 .../server/ratis/ContainerStateMachine.java     |   293 +
 .../server/ratis/XceiverServerRatis.java        |   214 +
 .../transport/server/ratis/package-info.java    |    23 +
 .../container/common/utils/ContainerCache.java  |   168 +
 .../container/common/utils/package-info.java    |    18 +
 .../container/ozoneimpl/OzoneContainer.java     |   277 +
 .../ozone/container/ozoneimpl/package-info.java |    21 +
 .../org/apache/hadoop/ozone/package-info.java   |    23 +
 .../StorageContainerDatanodeProtocol.java       |    96 +
 .../protocol/StorageContainerNodeProtocol.java  |    68 +
 .../hadoop/ozone/protocol/VersionResponse.java  |   150 +
 .../commands/CloseContainerCommand.java         |    76 +
 .../protocol/commands/DeleteBlocksCommand.java  |    66 +
 .../protocol/commands/RegisteredCommand.java    |   229 +
 .../protocol/commands/ReregisterCommand.java    |    59 +
 .../ozone/protocol/commands/SCMCommand.java     |    41 +
 .../protocol/commands/SendContainerCommand.java |    80 +
 .../ozone/protocol/commands/package-info.java   |    21 +
 .../hadoop/ozone/protocol/package-info.java     |    23 +
 ...rDatanodeProtocolClientSideTranslatorPB.java |   204 +
 .../StorageContainerDatanodeProtocolPB.java     |    34 +
 ...rDatanodeProtocolServerSideTranslatorPB.java |   119 +
 .../hadoop/ozone/protocolPB/package-info.java   |    19 +
 .../StorageContainerDatanodeProtocol.proto      |   353 +
 ...m.sun.jersey.spi.container.ContainerProvider |    16 +
 .../container/common/ContainerTestUtils.java    |    68 +
 .../ozone/container/common/SCMTestUtils.java    |   120 +
 .../ozone/container/common/ScmTestMock.java     |   274 +
 .../common/TestDatanodeStateMachine.java        |   379 +
 .../testutils/BlockDeletingServiceTestImpl.java |   104 +
 .../ozone/container/testutils/package-info.java |    18 +
 .../src/test/resources/log4j.properties         |    23 +
 hadoop-hdds/framework/README.md                 |    24 +
 hadoop-hdds/framework/pom.xml                   |    70 +
 .../hadoop/hdds/server/BaseHttpServer.java      |   222 +
 .../apache/hadoop/hdds/server/ServerUtils.java  |   139 +
 .../hadoop/hdds/server/ServiceRuntimeInfo.java  |    64 +
 .../hdds/server/ServiceRuntimeInfoImpl.java     |    55 +
 .../apache/hadoop/hdds/server/package-info.java |    23 +
 .../src/main/resources/webapps/datanode/dn.js   |    92 +
 .../webapps/static/angular-1.6.4.min.js         |   332 +
 .../webapps/static/angular-nvd3-1.0.9.min.js    |     1 +
 .../webapps/static/angular-route-1.6.4.min.js   |    17 +
 .../resources/webapps/static/d3-3.5.17.min.js   |     5 +
 .../main/resources/webapps/static/dfs-dust.js   |   133 +
 .../resources/webapps/static/nvd3-1.8.5.min.css |     2 +
 .../webapps/static/nvd3-1.8.5.min.css.map       |     1 +
 .../resources/webapps/static/nvd3-1.8.5.min.js  |    11 +
 .../webapps/static/nvd3-1.8.5.min.js.map        |     1 +
 .../src/main/resources/webapps/static/ozone.css |    60 +
 .../src/main/resources/webapps/static/ozone.js  |   387 +
 .../webapps/static/templates/config.html        |    91 +
 .../resources/webapps/static/templates/jvm.html |    26 +
 .../webapps/static/templates/menu.html          |    60 +
 .../webapps/static/templates/overview.html      |    39 +
 .../webapps/static/templates/rpc-metrics.html   |    87 +
 .../hadoop/hdds/server/TestBaseHttpServer.java  |    98 +
 .../framework/src/test/resources/ozone-site.xml |    24 +
 hadoop-hdds/pom.xml                             |   117 +
 hadoop-hdds/server-scm/pom.xml                  |   159 +
 .../hadoop/hdds/scm/block/BlockManager.java     |    86 +
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   530 +
 .../hdds/scm/block/BlockmanagerMXBean.java      |    30 +
 .../block/DatanodeDeletedBlockTransactions.java |   132 +
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |   124 +
 .../hdds/scm/block/DeletedBlockLogImpl.java     |   356 +
 .../hdds/scm/block/SCMBlockDeletingService.java |   175 +
 .../hadoop/hdds/scm/block/package-info.java     |    22 +
 .../hdds/scm/container/ContainerMapping.java    |   607 +
 .../scm/container/ContainerStateManager.java    |   456 +
 .../hadoop/hdds/scm/container/Mapping.java      |   106 +
 .../scm/container/closer/ContainerCloser.java   |   192 +
 .../hdds/scm/container/closer/package-info.java |    23 +
 .../hadoop/hdds/scm/container/package-info.java |    22 +
 .../algorithms/ContainerPlacementPolicy.java    |    41 +
 .../placement/algorithms/SCMCommonPolicy.java   |   197 +
 .../SCMContainerPlacementCapacity.java          |   133 +
 .../algorithms/SCMContainerPlacementRandom.java |    89 +
 .../placement/algorithms/package-info.java      |    18 +
 .../placement/metrics/ContainerStat.java        |   165 +
 .../placement/metrics/DatanodeMetric.java       |    91 +
 .../container/placement/metrics/LongMetric.java |   162 +
 .../container/placement/metrics/NodeStat.java   |    67 +
 .../container/placement/metrics/SCMMetrics.java |   154 +
 .../placement/metrics/SCMNodeMetric.java        |   223 +
 .../placement/metrics/SCMNodeStat.java          |   139 +
 .../placement/metrics/package-info.java         |    20 +
 .../scm/container/placement/package-info.java   |    19 +
 .../replication/ContainerSupervisor.java        |   343 +
 .../container/replication/InProgressPool.java   |   313 +
 .../scm/container/replication/PeriodicPool.java |   119 +
 .../scm/container/replication/package-info.java |    23 +
 .../container/states/ContainerAttribute.java    |   245 +
 .../scm/container/states/ContainerState.java    |    96 +
 .../scm/container/states/ContainerStateMap.java |   405 +
 .../hdds/scm/container/states/package-info.java |    22 +
 .../hdds/scm/exceptions/SCMException.java       |   119 +
 .../hdds/scm/exceptions/package-info.java       |    18 +
 .../hadoop/hdds/scm/node/CommandQueue.java      |   190 +
 .../hdds/scm/node/HeartbeatQueueItem.java       |   117 +
 .../hadoop/hdds/scm/node/NodeManager.java       |   153 +
 .../hadoop/hdds/scm/node/NodeManagerMXBean.java |    57 +
 .../hadoop/hdds/scm/node/NodePoolManager.java   |    71 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |   910 +
 .../hdds/scm/node/SCMNodePoolManager.java       |   269 +
 .../hadoop/hdds/scm/node/package-info.java      |    31 +
 .../apache/hadoop/hdds/scm/package-info.java    |    22 +
 .../hdds/scm/pipelines/PipelineManager.java     |   175 +
 .../hdds/scm/pipelines/PipelineSelector.java    |   230 +
 .../hadoop/hdds/scm/pipelines/package-info.java |    38 +
 .../scm/pipelines/ratis/RatisManagerImpl.java   |   158 +
 .../hdds/scm/pipelines/ratis/package-info.java  |    18 +
 .../standalone/StandaloneManagerImpl.java       |   145 +
 .../scm/pipelines/standalone/package-info.java  |    18 +
 .../hadoop/hdds/scm/ratis/package-info.java     |    22 +
 .../hdds/scm/server/SCMBlockProtocolServer.java |   222 +
 .../scm/server/SCMClientProtocolServer.java     |   314 +
 .../scm/server/SCMDatanodeProtocolServer.java   |   350 +
 .../hadoop/hdds/scm/server/SCMMXBean.java       |    50 +
 .../hadoop/hdds/scm/server/SCMStorage.java      |    73 +
 .../scm/server/StorageContainerManager.java     |   722 +
 .../StorageContainerManagerHttpServer.java      |    77 +
 .../hadoop/hdds/scm/server/package-info.java    |    22 +
 .../server-scm/src/main/webapps/scm/index.html  |    76 +
 .../server-scm/src/main/webapps/scm/main.html   |    20 +
 .../src/main/webapps/scm/scm-overview.html      |    60 +
 .../server-scm/src/main/webapps/scm/scm.js      |    54 +
 .../hadoop/hdds/scm/HddsServerUtilTest.java     |   308 +
 .../TestStorageContainerManagerHttpServer.java  |   143 +
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |   109 +
 .../hadoop/hdds/scm/block/TestBlockManager.java |   160 +
 .../hdds/scm/block/TestDeletedBlockLog.java     |   363 +
 .../hdds/scm/container/MockNodeManager.java     |   520 +
 .../scm/container/TestContainerMapping.java     |   333 +
 .../container/closer/TestContainerCloser.java   |   228 +
 .../states/TestContainerAttribute.java          |   143 +
 .../hdds/scm/node/TestContainerPlacement.java   |   176 +
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  1176 +
 .../hdds/scm/node/TestSCMNodePoolManager.java   |   160 +
 .../apache/hadoop/hdds/scm/package-info.java    |    21 +
 .../ozone/container/common/TestEndPoint.java    |   458 +
 .../placement/TestContainerPlacement.java       |   134 +
 .../placement/TestDatanodeMetrics.java          |    59 +
 .../replication/TestContainerSupervisor.java    |   272 +
 .../container/replication/package-info.java     |    18 +
 .../ReplicationDatanodeStateManager.java        |   100 +
 .../testutils/ReplicationNodeManagerMock.java   |   326 +
 .../ReplicationNodePoolManagerMock.java         |   133 +
 .../ozone/container/testutils/package-info.java |    18 +
 hadoop-hdds/tools/pom.xml                       |    65 +
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java       |    43 +
 .../hdds/scm/cli/OzoneCommandHandler.java       |    87 +
 .../apache/hadoop/hdds/scm/cli/ResultCode.java  |    31 +
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java  |   234 +
 .../cli/container/CloseContainerHandler.java    |    83 +
 .../cli/container/ContainerCommandHandler.java  |   129 +
 .../cli/container/CreateContainerHandler.java   |    81 +
 .../cli/container/DeleteContainerHandler.java   |    92 +
 .../scm/cli/container/InfoContainerHandler.java |   114 +
 .../scm/cli/container/ListContainerHandler.java |   121 +
 .../hdds/scm/cli/container/package-info.java    |    19 +
 .../hadoop/hdds/scm/cli/package-info.java       |    19 +
 .../main/java/org/apache/hadoop/fs/Hdfs.java    |     4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |    56 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |     0
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  |     0
 .../hadoop/hdfs/DFSStripedInputStream.java      |     2 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |     2 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |     5 +-
 .../apache/hadoop/hdfs/FileChecksumHelper.java  |   365 +-
 .../org/apache/hadoop/hdfs/ReadStatistics.java  |    29 +
 .../org/apache/hadoop/hdfs/StripeReader.java    |     8 +
 .../hdfs/client/HdfsClientConfigKeys.java       |     2 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |    27 +
 .../hadoop/hdfs/protocol/AclException.java      |    10 +
 .../hdfs/protocol/BlockChecksumOptions.java     |    54 +
 .../hadoop/hdfs/protocol/BlockChecksumType.java |    30 +
 .../datatransfer/DataTransferProtocol.java      |    12 +-
 .../hdfs/protocol/datatransfer/Sender.java      |    11 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |    44 +
 .../ha/RequestHedgingProxyProvider.java         |    14 +
 .../server/protocol/DataNodeUsageReport.java    |   181 +
 .../protocol/DataNodeUsageReportUtil.java       |   101 +
 .../hdfs/server/protocol/package-info.java      |    26 +
 .../hdfs/shortcircuit/ShortCircuitCache.java    |    11 +-
 .../src/main/proto/datatransfer.proto           |     7 +-
 .../src/main/proto/hdfs.proto                   |    21 +
 .../org/apache/hadoop/hdfs/TestDFSPacket.java   |     0
 .../ha/TestRequestHedgingProxyProvider.java     |    79 +
 .../hdfs/web/TestByteRangeInputStream.java      |     2 +-
 .../apache/hadoop/hdfs/web/TestTokenAspect.java |     2 +-
 .../src/main/native/libhdfs-tests/expect.h      |    60 +
 .../libhdfs-tests/test_libhdfs_mini_stress.c    |   253 +-
 .../src/main/native/libhdfspp/CMakeLists.txt    |     2 +-
 .../native/libhdfspp/include/hdfspp/hdfspp.h    |    53 +-
 .../native/libhdfspp/include/hdfspp/ioservice.h |   140 +
 .../native/libhdfspp/lib/bindings/c/hdfs.cc     |     7 +-
 .../native/libhdfspp/lib/common/CMakeLists.txt  |     2 +-
 .../native/libhdfspp/lib/common/async_stream.h  |    13 +-
 .../libhdfspp/lib/common/continuation/asio.h    |     5 -
 .../libhdfspp/lib/common/hdfs_ioservice.cc      |   146 -
 .../libhdfspp/lib/common/hdfs_ioservice.h       |    79 -
 .../libhdfspp/lib/common/ioservice_impl.cc      |   159 +
 .../libhdfspp/lib/common/ioservice_impl.h       |    76 +
 .../main/native/libhdfspp/lib/common/logging.h  |     3 -
 .../libhdfspp/lib/common/namenode_info.cc       |    15 +-
 .../native/libhdfspp/lib/common/namenode_info.h |     8 +-
 .../main/native/libhdfspp/lib/common/util.cc    |    14 +-
 .../src/main/native/libhdfspp/lib/common/util.h |    25 +-
 .../lib/connection/datanodeconnection.cc        |    27 +-
 .../lib/connection/datanodeconnection.h         |    26 +-
 .../main/native/libhdfspp/lib/fs/filehandle.cc  |    18 +-
 .../main/native/libhdfspp/lib/fs/filehandle.h   |    12 +-
 .../main/native/libhdfspp/lib/fs/filesystem.cc  |    67 +-
 .../main/native/libhdfspp/lib/fs/filesystem.h   |    66 +-
 .../libhdfspp/lib/fs/namenode_operations.h      |     4 +-
 .../native/libhdfspp/lib/reader/block_reader.cc |    18 +-
 .../native/libhdfspp/lib/reader/block_reader.h  |    10 +-
 .../native/libhdfspp/lib/reader/datatransfer.h  |     4 +-
 .../libhdfspp/lib/rpc/namenode_tracker.cc       |     2 +-
 .../native/libhdfspp/lib/rpc/namenode_tracker.h |     4 +-
 .../main/native/libhdfspp/lib/rpc/request.cc    |     5 +-
 .../native/libhdfspp/lib/rpc/rpc_connection.h   |     2 +-
 .../libhdfspp/lib/rpc/rpc_connection_impl.cc    |    32 +-
 .../libhdfspp/lib/rpc/rpc_connection_impl.h     |     9 +-
 .../main/native/libhdfspp/lib/rpc/rpc_engine.cc |    14 +-
 .../main/native/libhdfspp/lib/rpc/rpc_engine.h  |     9 +-
 .../main/native/libhdfspp/tests/CMakeLists.txt  |     6 +
 .../native/libhdfspp/tests/bad_datanode_test.cc |    31 +-
 .../libhdfspp/tests/hdfs_ioservice_test.cc      |    10 +-
 .../main/native/libhdfspp/tests/memcheck.supp   |    27 +
 .../native/libhdfspp/tests/mock_connection.h    |     4 +-
 .../libhdfspp/tests/remote_block_reader_test.cc |     4 +-
 .../native/libhdfspp/tests/rpc_engine_test.cc   |   112 +-
 ...uterAdminProtocolServerSideTranslatorPB.java |    67 +
 .../RouterAdminProtocolTranslatorPB.java        |    64 +-
 .../federation/metrics/FederationMetrics.java   |     4 +-
 .../federation/metrics/FederationRPCMBean.java  |     2 +
 .../metrics/FederationRPCMetrics.java           |    10 +
 .../FederationRPCPerformanceMonitor.java        |     5 +
 .../federation/metrics/NamenodeBeanMetrics.java |     3 +
 .../resolver/ActiveNamenodeResolver.java        |     8 +
 .../FederationNamenodeServiceState.java         |     3 +-
 .../resolver/FederationNamespaceInfo.java       |     5 +
 .../resolver/MembershipNamenodeResolver.java    |   110 +-
 .../federation/resolver/MountTableResolver.java |     4 +-
 .../federation/resolver/RemoteLocation.java     |    35 +-
 .../resolver/order/LocalResolver.java           |     3 +-
 .../federation/router/NameserviceManager.java   |    51 +
 .../server/federation/router/RBFConfigKeys.java |     3 +
 .../router/RemoteLocationContext.java           |     7 +
 .../federation/router/RouterAdminServer.java    |    87 +-
 .../server/federation/router/RouterClient.java  |     4 +
 .../router/RouterHeartbeatService.java          |     4 +-
 .../router/RouterPermissionChecker.java         |    59 +-
 .../federation/router/RouterRpcClient.java      |   108 +-
 .../federation/router/RouterRpcMonitor.java     |     6 +
 .../federation/router/RouterRpcServer.java      |   115 +-
 .../router/RouterSafeModeException.java         |    53 -
 .../router/SubClusterTimeoutException.java      |    33 +
 .../store/DisabledNameserviceStore.java         |    65 +
 .../federation/store/StateStoreService.java     |     5 +-
 .../driver/impl/StateStoreFileBaseImpl.java     |     2 +-
 .../store/driver/impl/StateStoreFileImpl.java   |    11 +-
 .../driver/impl/StateStoreFileSystemImpl.java   |     6 +-
 .../impl/DisabledNameserviceStoreImpl.java      |    68 +
 .../protocol/DisableNameserviceRequest.java     |    47 +
 .../protocol/DisableNameserviceResponse.java    |    50 +
 .../protocol/EnableNameserviceRequest.java      |    47 +
 .../protocol/EnableNameserviceResponse.java     |    50 +
 .../GetDisabledNameservicesRequest.java         |    30 +
 .../GetDisabledNameservicesResponse.java        |    51 +
 .../pb/DisableNameserviceRequestPBImpl.java     |    73 +
 .../pb/DisableNameserviceResponsePBImpl.java    |    74 +
 .../impl/pb/EnableNameserviceRequestPBImpl.java |    73 +
 .../pb/EnableNameserviceResponsePBImpl.java     |    73 +
 .../GetDisabledNameservicesRequestPBImpl.java   |    66 +
 .../GetDisabledNameservicesResponsePBImpl.java  |    84 +
 .../federation/store/records/BaseRecord.java    |    13 +
 .../store/records/DisabledNameservice.java      |    81 +
 .../federation/store/records/MountTable.java    |     2 +-
 .../impl/pb/DisabledNameservicePBImpl.java      |    95 +
 .../store/records/impl/pb/MountTablePBImpl.java |     2 +-
 .../hdfs/tools/federation/RouterAdmin.java      |   220 +-
 .../src/main/proto/FederationProtocol.proto     |    35 +-
 .../src/main/proto/RouterProtocol.proto         |    15 +
 .../src/main/resources/hdfs-rbf-default.xml     |     9 +
 .../main/webapps/router/federationhealth.html   |    18 +-
 .../src/main/webapps/router/federationhealth.js |     8 +-
 .../src/main/webapps/static/rbf.css             |     5 +
 .../src/site/markdown/HDFSRouterFederation.md   |    27 +-
 .../server/federation/FederationTestUtils.java  |    42 +
 .../server/federation/MiniRouterDFSCluster.java |    39 +-
 .../hdfs/server/federation/MockResolver.java    |    18 +-
 .../server/federation/StateStoreDFSCluster.java |    28 +
 .../router/TestDisableNameservices.java         |   236 +
 .../server/federation/router/TestRouter.java    |    70 +-
 .../federation/router/TestRouterAdmin.java      |   103 +-
 .../federation/router/TestRouterAdminCLI.java   |   320 +-
 .../router/TestRouterClientRejectOverload.java  |   243 +
 .../federation/router/TestRouterMountTable.java |    79 +-
 .../federation/router/TestRouterQuota.java      |     4 +
 .../router/TestRouterRPCClientRetries.java      |    93 +-
 .../server/federation/router/TestRouterRpc.java |    86 +-
 .../federation/router/TestRouterSafemode.java   |     3 +-
 .../server/federation/router/TestSafeMode.java  |    82 +
 .../TestStateStoreDisabledNameservice.java      |    71 +
 .../store/driver/TestStateStoreDriverBase.java  |    27 +-
 .../store/records/TestMountTable.java           |     4 +-
 .../src/test/resources/contract/webhdfs.xml     |     5 +
 .../jdiff/Apache_Hadoop_HDFS_3.0.2.xml          |   324 +
 .../jdiff/Apache_Hadoop_HDFS_3.1.0.xml          |   676 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |    22 +-
 .../hadoop/hdfs/net/DFSNetworkTopology.java     |    10 +-
 .../hdfs/protocol/datatransfer/Receiver.java    |     8 +-
 .../protocol/datatransfer/package-info.java     |     8 +-
 .../hdfs/qjournal/server/JournalNode.java       |    62 +-
 .../qjournal/server/JournalNodeHttpServer.java  |    65 +-
 .../qjournal/server/JournalNodeRpcServer.java   |    30 +-
 .../server/blockmanagement/BlockIdManager.java  |    17 +
 .../server/blockmanagement/BlockManager.java    |     5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |     2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  |    12 +-
 .../blockmanagement/CorruptReplicasMap.java     |    35 +-
 .../blockmanagement/InvalidateBlocks.java       |    13 +-
 .../common/blockaliasmap/package-info.java      |     9 +-
 .../server/datanode/BlockChecksumHelper.java    |   289 +-
 .../hdfs/server/datanode/DataXceiver.java       |    26 +-
 .../hdfs/server/datanode/LocalReplica.java      |    18 +-
 ...dBlockChecksumCompositeCrcReconstructor.java |    80 +
 ...StripedBlockChecksumMd5CrcReconstructor.java |    74 +
 .../StripedBlockChecksumReconstructor.java      |    66 +-
 .../erasurecode/StripedBlockReconstructor.java  |     1 +
 .../impl/FsDatasetAsyncDiskService.java         |     3 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |     9 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |    38 +-
 .../fsdataset/impl/FsVolumeImplBuilder.java     |    16 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |    11 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java      |     2 +-
 .../fsdataset/impl/ReservedSpaceCalculator.java |   227 +
 .../datanode/metrics/DataNodeMetrics.java       |    12 +-
 .../diskbalancer/connectors/package-info.java   |     3 +-
 .../diskbalancer/datamodel/package-info.java    |     4 +-
 .../hdfs/server/diskbalancer/package-info.java  |     2 +-
 .../diskbalancer/planner/package-info.java      |     2 +-
 .../server/namenode/EncryptionZoneManager.java  |     8 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |    12 +
 .../hdfs/server/namenode/FSEditLogAsync.java    |     8 +-
 .../hdfs/server/namenode/FSEditLogOp.java       |     4 +-
 .../hdfs/server/namenode/FSTreeTraverser.java   |   339 +
 .../hadoop/hdfs/server/namenode/NameNode.java   |    45 +-
 .../hdfs/server/namenode/NameNodeUtils.java     |   125 +
 .../server/namenode/ReencryptionHandler.java    |   615 +-
 .../server/namenode/ReencryptionUpdater.java    |     2 +-
 .../org/apache/hadoop/hdfs/tools/DFSck.java     |     2 +-
 .../OfflineImageReconstructor.java              |     4 +-
 .../src/main/proto/NamenodeProtocol.proto       |     5 +-
 .../src/main/resources/hdfs-default.xml         |    86 +
 .../src/main/webapps/datanode/datanode.html     |    46 +
 .../src/site/markdown/ArchivalStorage.md        |     2 +-
 .../src/site/markdown/HDFSCommands.md           |    10 +-
 .../src/site/markdown/MemoryStorage.md          |     2 +-
 .../src/site/markdown/TransparentEncryption.md  |    57 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |    33 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |    25 +-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java |     2 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |     2 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |     3 +
 .../hadoop/hdfs/TestEncryptionZonesWithKMS.java |     2 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |    71 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java    |   101 +-
 .../hdfs/TestFileChecksumCompositeCrc.java      |    47 +
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |     2 +-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  |    16 +-
 .../hdfs/client/impl/BlockReaderTestUtil.java   |     5 +
 .../hdfs/client/impl/TestBlockReaderLocal.java  |    60 +
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    |    14 +
 .../TestJournalNodeRespectsBindHostKeys.java    |   200 +
 .../TestDelegationTokenForProxyUser.java        |     2 +-
 .../hdfs/server/balancer/TestKeyManager.java    |     2 +-
 .../blockmanagement/BlockManagerTestUtil.java   |     2 +-
 .../blockmanagement/TestBlockInfoStriped.java   |     2 +-
 .../blockmanagement/TestBlockManager.java       |    61 +-
 .../TestBlockManagerSafeMode.java               |     3 +-
 .../TestComputeInvalidateWork.java              |     2 +-
 .../blockmanagement/TestCorruptReplicaInfo.java |    48 +-
 .../blockmanagement/TestDatanodeManager.java    |    54 +-
 .../blockmanagement/TestHostFileManager.java    |     3 +-
 .../TestPendingInvalidateBlock.java             |     3 +-
 ...constructStripedBlocksWithRackAwareness.java |     2 +-
 .../TestSequentialBlockGroupId.java             |     2 +-
 .../TestUnderReplicatedBlocks.java              |     2 +-
 .../server/datanode/TestBlockPoolManager.java   |     2 +-
 .../hdfs/server/datanode/TestDNUsageReport.java |   117 +
 .../fsdataset/impl/TestFsDatasetImpl.java       |   107 +-
 .../fsdataset/impl/TestFsVolumeList.java        |    90 +-
 .../impl/TestReservedSpaceCalculator.java       |   171 +
 .../fsdataset/impl/TestWriteToReplica.java      |    19 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |     2 +-
 .../namenode/TestAddStripedBlockInFBR.java      |     2 +-
 .../namenode/TestClientNameNodeAddress.java     |   104 +
 .../TestCommitBlockSynchronization.java         |     2 +-
 .../hdfs/server/namenode/TestDeleteRace.java    |     2 +-
 .../hdfs/server/namenode/TestFSNamesystem.java  |     2 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   123 +-
 .../hdfs/server/namenode/TestReencryption.java  |     5 +-
 .../namenode/TestReencryptionHandler.java       |    12 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |     2 +-
 .../namenode/TestTruncateQuotaUpdate.java       |     2 +-
 .../ha/TestBootstrapStandbyWithQJM.java         |     2 +-
 .../namenode/ha/TestDFSUpgradeWithHA.java       |     2 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |     2 +-
 .../hdfs/server/namenode/ha/TestHASafeMode.java |     2 +-
 .../snapshot/TestFileWithSnapshotFeature.java   |     2 +-
 .../snapshot/TestRenameWithSnapshots.java       |     2 +-
 .../shortcircuit/TestShortCircuitCache.java     |    26 +
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |    80 +-
 .../TestOfflineImageViewer.java                 |     7 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |     2 +-
 .../hadoop/hdfs/web/TestWebHDFSForHA.java       |     2 +-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java      |     2 +-
 .../security/TestRefreshUserMappings.java       |    10 +-
 .../src/test/resources/log4j.properties         |    24 +
 .../src/test/resources/testCryptoConf.xml       |    19 +
 .../Apache_Hadoop_MapReduce_Common_3.1.0.xml    |   113 +
 .../Apache_Hadoop_MapReduce_Core_3.1.0.xml      | 28075 +++++++++++++++++
 .../Apache_Hadoop_MapReduce_JobClient_3.1.0.xml |    16 +
 .../apache/hadoop/mapred/MapReduceChildJVM.java |    73 +-
 .../jobhistory/JobHistoryEventHandler.java      |    68 +-
 .../v2/app/job/impl/TaskAttemptImpl.java        |     8 +-
 .../mapreduce/v2/app/job/impl/TaskImpl.java     |     0
 .../jobhistory/TestJobHistoryEventHandler.java  |    29 +
 .../v2/app/job/impl/TestMapReduceChildJVM.java  |    24 +-
 .../mapreduce/v2/app/job/impl/TestTaskImpl.java |     0
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |    10 +
 .../apache/hadoop/mapred/FileInputFormat.java   |    25 +-
 .../java/org/apache/hadoop/mapred/JobConf.java  |    18 +
 .../apache/hadoop/mapred/pipes/Application.java |     5 +-
 .../HumanReadableHistoryViewerPrinter.java      |     3 +-
 .../jobhistory/JSONHistoryViewerPrinter.java    |     3 +-
 .../mapreduce/lib/input/FileInputFormat.java    |     8 +
 .../hadoop/mapreduce/security/TokenCache.java   |    14 +-
 .../src/main/resources/mapred-default.xml       |    67 +-
 .../src/site/markdown/MapReduceTutorial.md      |     6 +
 .../PluggableShuffleAndPluggableSort.md         |    44 +
 .../hadoop/mapred/TestFileInputFormat.java      |    17 +-
 .../jobhistory/TestHistoryViewerPrinter.java    |    76 +
 .../lib/input/TestFileInputFormat.java          |    12 +
 .../mapreduce/security/TestTokenCache.java      |    18 +-
 .../mapreduce/task/reduce/TestMergeManager.java |     2 +-
 .../HistoryServerLeveldbStateStoreService.java  |    11 -
 .../org/apache/hadoop/mapred/NotRunningJob.java |     2 +-
 .../org/apache/hadoop/mapred/YARNRunner.java    |    11 +-
 .../mapred/TestClientServiceDelegate.java       |     4 +-
 .../apache/hadoop/mapred/TestYARNRunner.java    |    29 +-
 .../mapred/pipes/TestPipeApplication.java       |    19 +-
 .../mapreduce/MiniHadoopClusterManager.java     |     1 +
 .../apache/hadoop/mapred/ShuffleHandler.java    |    12 -
 hadoop-maven-plugins/pom.xml                    |     2 +-
 hadoop-minicluster/pom.xml                      |     6 +
 hadoop-ozone/acceptance-test/README.md          |    38 +
 hadoop-ozone/acceptance-test/pom.xml            |    86 +
 .../acceptance-test/src/test/compose/.env       |    17 +
 .../src/test/compose/docker-compose.yaml        |    61 +
 .../src/test/compose/docker-config              |    35 +
 .../test/robotframework/acceptance/ozone.robot  |   116 +
 hadoop-ozone/client/pom.xml                     |    43 +
 .../apache/hadoop/ozone/client/BucketArgs.java  |   123 +
 .../apache/hadoop/ozone/client/ObjectStore.java |   211 +
 .../apache/hadoop/ozone/client/OzoneBucket.java |   363 +
 .../apache/hadoop/ozone/client/OzoneClient.java |   103 +
 .../hadoop/ozone/client/OzoneClientFactory.java |   307 +
 .../client/OzoneClientInvocationHandler.java    |    62 +
 .../apache/hadoop/ozone/client/OzoneKey.java    |   119 +
 .../apache/hadoop/ozone/client/OzoneVolume.java |   295 +
 .../apache/hadoop/ozone/client/VolumeArgs.java  |   128 +
 .../ozone/client/io/ChunkGroupInputStream.java  |   319 +
 .../ozone/client/io/ChunkGroupOutputStream.java |   495 +
 .../client/io/OzoneContainerTranslation.java    |    51 +
 .../ozone/client/io/OzoneInputStream.java       |    60 +
 .../ozone/client/io/OzoneOutputStream.java      |    64 +
 .../hadoop/ozone/client/io/package-info.java    |    23 +
 .../hadoop/ozone/client/package-info.java       |    23 +
 .../ozone/client/protocol/ClientProtocol.java   |   323 +
 .../ozone/client/protocol/package-info.java     |    23 +
 .../client/rest/DefaultRestServerSelector.java  |    36 +
 .../ozone/client/rest/OzoneExceptionMapper.java |    45 +
 .../hadoop/ozone/client/rest/RestClient.java    |   799 +
 .../ozone/client/rest/RestServerSelector.java   |    40 +
 .../client/rest/exceptions/package-info.java    |    22 +
 .../ozone/client/rest/headers/package-info.java |    22 +
 .../hadoop/ozone/client/rest/package-info.java  |    23 +
 .../hadoop/ozone/client/rpc/RpcClient.java      |   567 +
 .../hadoop/ozone/client/rpc/package-info.java   |    23 +
 .../hadoop/ozone/web/client/OzoneBucket.java    |   645 +
 .../hadoop/ozone/web/client/OzoneKey.java       |    44 +
 .../ozone/web/client/OzoneRestClient.java       |   803 +
 .../web/client/OzoneRestClientException.java    |    54 +
 .../hadoop/ozone/web/client/OzoneVolume.java    |   583 +
 .../hadoop/ozone/web/client/package-info.java   |    34 +
 .../ozone/client/TestHddsClientUtils.java       |   105 +
 .../hadoop/ozone/client/package-info.java       |    23 +
 .../common/dev-support/findbugsExcludeFile.xml  |    22 +
 hadoop-ozone/common/pom.xml                     |    88 +
 hadoop-ozone/common/src/main/bin/ozone          |   188 +
 hadoop-ozone/common/src/main/bin/start-ozone.sh |   105 +
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |    99 +
 .../common/src/main/conf/ozone-site.xml         |    24 +
 .../java/org/apache/hadoop/ozone/KsmUtils.java  |    87 +
 .../ozone/client/io/LengthInputStream.java      |    49 +
 .../hadoop/ozone/client/io/package-info.java    |    22 +
 .../ozone/client/rest/OzoneException.java       |   267 +
 .../ozone/client/rest/headers/Header.java       |    71 +
 .../ozone/client/rest/headers/package-info.java |    22 +
 .../hadoop/ozone/client/rest/package-info.java  |    22 +
 .../ozone/client/rest/response/BucketInfo.java  |   231 +
 .../ozone/client/rest/response/KeyInfo.java     |   216 +
 .../ozone/client/rest/response/VolumeInfo.java  |   215 +
 .../ozone/client/rest/response/VolumeOwner.java |    61 +
 .../client/rest/response/package-info.java      |    24 +
 .../apache/hadoop/ozone/freon/OzoneGetConf.java |   269 +
 .../apache/hadoop/ozone/freon/package-info.java |    21 +
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  |    81 +
 .../hadoop/ozone/ksm/helpers/KsmBucketArgs.java |   233 +
 .../hadoop/ozone/ksm/helpers/KsmBucketInfo.java |   235 +
 .../hadoop/ozone/ksm/helpers/KsmKeyArgs.java    |   119 +
 .../hadoop/ozone/ksm/helpers/KsmKeyInfo.java    |   243 +
 .../ozone/ksm/helpers/KsmKeyLocationInfo.java   |   136 +
 .../ksm/helpers/KsmKeyLocationInfoGroup.java    |   118 +
 .../ozone/ksm/helpers/KsmOzoneAclMap.java       |   110 +
 .../hadoop/ozone/ksm/helpers/KsmVolumeArgs.java |   223 +
 .../ozone/ksm/helpers/OpenKeySession.java       |    50 +
 .../hadoop/ozone/ksm/helpers/ServiceInfo.java   |   237 +
 .../hadoop/ozone/ksm/helpers/VolumeArgs.java    |   140 +
 .../hadoop/ozone/ksm/helpers/package-info.java  |    18 +
 .../apache/hadoop/ozone/ksm/package-info.java   |    21 +
 .../ksm/protocol/KeySpaceManagerProtocol.java   |   245 +
 .../hadoop/ozone/ksm/protocol/package-info.java |    19 +
 ...ceManagerProtocolClientSideTranslatorPB.java |   744 +
 .../protocolPB/KeySpaceManagerProtocolPB.java   |    34 +
 .../ozone/ksm/protocolPB/package-info.java      |    19 +
 .../org/apache/hadoop/ozone/package-info.java   |    22 +
 .../hadoop/ozone/protocolPB/KSMPBHelper.java    |   113 +
 .../hadoop/ozone/protocolPB/package-info.java   |    24 +
 .../hadoop/ozone/web/handlers/BucketArgs.java   |   214 +
 .../hadoop/ozone/web/handlers/KeyArgs.java      |   117 +
 .../hadoop/ozone/web/handlers/ListArgs.java     |   142 +
 .../hadoop/ozone/web/handlers/UserArgs.java     |   168 +
 .../hadoop/ozone/web/handlers/VolumeArgs.java   |   142 +
 .../hadoop/ozone/web/handlers/package-info.java |    22 +
 .../hadoop/ozone/web/request/OzoneQuota.java    |   214 +
 .../hadoop/ozone/web/request/package-info.java  |    23 +
 .../hadoop/ozone/web/response/BucketInfo.java   |   325 +
 .../hadoop/ozone/web/response/KeyInfo.java      |   290 +
 .../hadoop/ozone/web/response/ListBuckets.java  |   154 +
 .../hadoop/ozone/web/response/ListKeys.java     |   209 +
 .../hadoop/ozone/web/response/ListVolumes.java  |   152 +
 .../hadoop/ozone/web/response/VolumeInfo.java   |   308 +
 .../hadoop/ozone/web/response/VolumeOwner.java  |    60 +
 .../hadoop/ozone/web/response/package-info.java |    23 +
 .../hadoop/ozone/web/utils/OzoneUtils.java      |   227 +
 .../hadoop/ozone/web/utils/package-info.java    |    18 +
 .../main/proto/KeySpaceManagerProtocol.proto    |   457 +
 .../src/main/shellprofile.d/hadoop-ozone.sh     |    44 +
 .../apache/hadoop/ozone/web/TestBucketInfo.java |    81 +
 .../org/apache/hadoop/ozone/web/TestQuota.java  |   116 +
 .../org/apache/hadoop/ozone/web/TestUtils.java  |   101 +
 .../hadoop/ozone/web/TestVolumeStructs.java     |    73 +
 .../apache/hadoop/ozone/web/package-info.java   |    21 +
 hadoop-ozone/integration-test/pom.xml           |    86 +
 .../container/TestContainerStateManager.java    |   309 +
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   322 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   425 +
 .../apache/hadoop/ozone/RatisTestHelper.java    |   110 +
 .../hadoop/ozone/TestContainerOperations.java   |    87 +
 .../hadoop/ozone/TestMiniOzoneCluster.java      |   217 +
 .../ozone/TestOzoneConfigurationFields.java     |    38 +
 .../apache/hadoop/ozone/TestOzoneHelper.java    |   413 +
 .../ozone/TestStorageContainerManager.java      |   443 +
 .../TestStorageContainerManagerHelper.java      |   179 +
 .../ozone/client/rest/TestOzoneRestClient.java  |   404 +
 .../hadoop/ozone/client/rest/package-info.java  |    23 +
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   815 +
 .../hadoop/ozone/client/rpc/package-info.java   |    23 +
 .../ozone/container/ContainerTestHelper.java    |   601 +
 .../common/TestBlockDeletingService.java        |   398 +
 .../TestContainerDeletionChoosingPolicy.java    |   197 +
 .../common/impl/TestContainerPersistence.java   |   905 +
 .../TestCloseContainerHandler.java              |   114 +
 .../container/metrics/TestContainerMetrics.java |   127 +
 .../container/ozoneimpl/TestOzoneContainer.java |   587 +
 .../ozoneimpl/TestOzoneContainerRatis.java      |   142 +
 .../container/ozoneimpl/TestRatisManager.java   |   128 +
 .../container/server/TestContainerServer.java   |   262 +
 .../hadoop/ozone/freon/TestDataValidate.java    |   145 +
 .../apache/hadoop/ozone/freon/TestFreon.java    |   130 +
 .../apache/hadoop/ozone/freon/package-info.java |    21 +
 .../ozone/ksm/TestContainerReportWithKeys.java  |   143 +
 .../apache/hadoop/ozone/ksm/TestKSMMetrcis.java |   306 +
 .../apache/hadoop/ozone/ksm/TestKSMSQLCli.java  |   284 +
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   |  1236 +
 .../ksm/TestKeySpaceManagerRestInterface.java   |   134 +
 .../ozone/ksm/TestKsmBlockVersioning.java       |   253 +
 .../ksm/TestMultipleContainerReadWrite.java     |   215 +
 .../hadoop/ozone/ozShell/TestOzoneShell.java    |   800 +
 .../hadoop/ozone/scm/TestAllocateContainer.java |    99 +
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   300 +
 .../ozone/scm/TestContainerSmallFile.java       |   147 +
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |   543 +
 .../apache/hadoop/ozone/scm/TestSCMMXBean.java  |   161 +
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |   258 +
 .../ozone/scm/TestXceiverClientManager.java     |   198 +
 .../ozone/scm/TestXceiverClientMetrics.java     |   168 +
 .../hadoop/ozone/scm/node/TestQueryNode.java    |   120 +
 .../ozone/web/TestDistributedOzoneVolumes.java  |   186 +
 .../hadoop/ozone/web/TestLocalOzoneVolumes.java |   185 +
 .../ozone/web/TestOzoneRestWithMiniCluster.java |   263 +
 .../hadoop/ozone/web/TestOzoneWebAccess.java    |   125 +
 .../hadoop/ozone/web/client/TestBuckets.java    |   248 +
 .../ozone/web/client/TestBucketsRatis.java      |    76 +
 .../hadoop/ozone/web/client/TestKeys.java       |   688 +
 .../hadoop/ozone/web/client/TestKeysRatis.java  |   127 +
 .../ozone/web/client/TestOzoneClient.java       |   305 +
 .../hadoop/ozone/web/client/TestVolume.java     |   433 +
 .../ozone/web/client/TestVolumeRatis.java       |   129 +
 .../src/test/resources/log4j.properties         |    18 +
 .../src/test/resources/webapps/ksm/.gitkeep     |    15 +
 .../src/test/resources/webapps/scm/.gitkeep     |    15 +
 hadoop-ozone/objectstore-service/pom.xml        |    69 +
 .../server/datanode/ObjectStoreHandler.java     |   191 +
 .../hdfs/server/datanode/package-info.java      |    22 +
 .../org/apache/hadoop/ozone/OzoneRestUtils.java |   222 +
 .../org/apache/hadoop/ozone/package-info.java   |    22 +
 .../ozone/web/ObjectStoreApplication.java       |    59 +
 .../ozone/web/OzoneHddsDatanodeService.java     |    84 +
 .../hadoop/ozone/web/exceptions/ErrorTable.java |   225 +
 .../ozone/web/exceptions/package-info.java      |    22 +
 .../ozone/web/handlers/BucketHandler.java       |   197 +
 .../web/handlers/BucketProcessTemplate.java     |   323 +
 .../hadoop/ozone/web/handlers/KeyHandler.java   |   245 +
 .../ozone/web/handlers/KeyProcessTemplate.java  |   235 +
 .../ozone/web/handlers/ServiceFilter.java       |    61 +
 .../web/handlers/StorageHandlerBuilder.java     |    76 +
 .../ozone/web/handlers/UserHandlerBuilder.java  |    75 +
 .../ozone/web/handlers/VolumeHandler.java       |   272 +
 .../web/handlers/VolumeProcessTemplate.java     |   276 +
 .../hadoop/ozone/web/handlers/package-info.java |    22 +
 .../hadoop/ozone/web/interfaces/Accounting.java |    57 +
 .../hadoop/ozone/web/interfaces/Bucket.java     |   183 +
 .../hadoop/ozone/web/interfaces/Keys.java       |   146 +
 .../ozone/web/interfaces/StorageHandler.java    |   295 +
 .../hadoop/ozone/web/interfaces/UserAuth.java   |   101 +
 .../hadoop/ozone/web/interfaces/Volume.java     |   182 +
 .../ozone/web/interfaces/package-info.java      |    22 +
 .../web/localstorage/LocalStorageHandler.java   |   379 +
 .../web/localstorage/OzoneMetadataManager.java  |  1138 +
 .../ozone/web/localstorage/package-info.java    |    18 +
 .../LengthInputStreamMessageBodyWriter.java     |    59 +
 .../web/messages/StringMessageBodyWriter.java   |    62 +
 .../hadoop/ozone/web/messages/package-info.java |    18 +
 .../web/netty/CloseableCleanupListener.java     |    46 +
 .../web/netty/ObjectStoreChannelHandler.java    |    78 +
 .../web/netty/ObjectStoreJerseyContainer.java   |   348 +
 .../ObjectStoreJerseyContainerProvider.java     |    40 +
 .../web/netty/ObjectStoreRestHttpServer.java    |   215 +
 .../web/netty/ObjectStoreURLDispatcher.java     |    61 +
 ...RequestContentObjectStoreChannelHandler.java |   117 +
 ...equestDispatchObjectStoreChannelHandler.java |   103 +
 .../hadoop/ozone/web/netty/package-info.java    |    26 +
 .../apache/hadoop/ozone/web/package-info.java   |    22 +
 .../web/storage/DistributedStorageHandler.java  |   539 +
 .../hadoop/ozone/web/storage/package-info.java  |    27 +
 .../hadoop/ozone/web/userauth/Simple.java       |   169 +
 .../hadoop/ozone/web/userauth/package-info.java |    23 +
 ...m.sun.jersey.spi.container.ContainerProvider |    16 +
 .../org/apache/hadoop/ozone/TestErrorCode.java  |    53 +
 hadoop-ozone/ozone-manager/pom.xml              |   114 +
 .../apache/hadoop/ozone/ksm/BucketManager.java  |    79 +
 .../hadoop/ozone/ksm/BucketManagerImpl.java     |   315 +
 .../org/apache/hadoop/ozone/ksm/KSMMXBean.java  |    31 +
 .../hadoop/ozone/ksm/KSMMetadataManager.java    |   253 +
 .../ozone/ksm/KSMMetadataManagerImpl.java       |   525 +
 .../org/apache/hadoop/ozone/ksm/KSMMetrics.java |   437 +
 .../org/apache/hadoop/ozone/ksm/KSMStorage.java |    90 +
 .../hadoop/ozone/ksm/KeyDeletingService.java    |   141 +
 .../org/apache/hadoop/ozone/ksm/KeyManager.java |   165 +
 .../apache/hadoop/ozone/ksm/KeyManagerImpl.java |   512 +
 .../hadoop/ozone/ksm/KeySpaceManager.java       |   901 +
 .../ozone/ksm/KeySpaceManagerHttpServer.java    |    78 +
 .../hadoop/ozone/ksm/OpenKeyCleanupService.java |   116 +
 .../ozone/ksm/ServiceListJSONServlet.java       |   103 +
 .../apache/hadoop/ozone/ksm/VolumeManager.java  |   100 +
 .../hadoop/ozone/ksm/VolumeManagerImpl.java     |   391 +
 .../ozone/ksm/exceptions/KSMException.java      |   116 +
 .../ozone/ksm/exceptions/package-info.java      |    19 +
 .../apache/hadoop/ozone/ksm/package-info.java   |    21 +
 ...ceManagerProtocolServerSideTranslatorPB.java |   539 +
 .../hadoop/ozone/protocolPB/package-info.java   |    22 +
 .../hadoop/ozone/web/ozShell/Handler.java       |    79 +
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |   415 +
 .../web/ozShell/bucket/CreateBucketHandler.java |    96 +
 .../web/ozShell/bucket/DeleteBucketHandler.java |    87 +
 .../web/ozShell/bucket/InfoBucketHandler.java   |    94 +
 .../web/ozShell/bucket/ListBucketHandler.java   |   113 +
 .../web/ozShell/bucket/UpdateBucketHandler.java |    94 +
 .../ozone/web/ozShell/bucket/package-info.java  |    23 +
 .../web/ozShell/keys/DeleteKeyHandler.java      |    96 +
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |   116 +
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |    98 +
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |   116 +
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |   106 +
 .../ozone/web/ozShell/keys/package-info.java    |    23 +
 .../hadoop/ozone/web/ozShell/package-info.java  |    27 +
 .../web/ozShell/volume/CreateVolumeHandler.java |    99 +
 .../web/ozShell/volume/DeleteVolumeHandler.java |    81 +
 .../web/ozShell/volume/InfoVolumeHandler.java   |    95 +
 .../web/ozShell/volume/ListVolumeHandler.java   |   114 +
 .../web/ozShell/volume/UpdateVolumeHandler.java |    92 +
 .../ozone/web/ozShell/volume/package-info.java  |    23 +
 .../apache/hadoop/ozone/web/package-info.java   |    24 +
 .../src/main/site/markdown/OzoneCommandShell.md |   150 +
 .../site/markdown/OzoneGettingStarted.md.vm     |   347 +
 .../src/main/site/markdown/OzoneMetrics.md      |   166 +
 .../src/main/site/markdown/OzoneOverview.md     |    88 +
 .../src/main/site/markdown/OzoneRest.md         |   549 +
 .../src/main/webapps/ksm/index.html             |    70 +
 .../src/main/webapps/ksm/ksm-metrics.html       |    44 +
 .../ozone-manager/src/main/webapps/ksm/ksm.js   |   108 +
 .../ozone-manager/src/main/webapps/ksm/main.css |    23 +
 .../src/main/webapps/ksm/main.html              |    18 +
 .../hadoop/ozone/ksm/TestBucketManagerImpl.java |   395 +
 .../hadoop/ozone/ksm/TestChunkStreams.java      |   234 +
 .../ksm/TestKeySpaceManagerHttpServer.java      |   141 +
 .../apache/hadoop/ozone/ksm/package-info.java   |    21 +
 hadoop-ozone/pom.xml                            |   169 +
 .../tools/dev-support/findbugsExcludeFile.xml   |    19 +
 hadoop-ozone/tools/pom.xml                      |    85 +
 .../org/apache/hadoop/ozone/freon/Freon.java    |  1149 +
 .../apache/hadoop/ozone/freon/package-info.java |    22 +
 .../genesis/BenchMarkContainerStateMap.java     |   171 +
 .../genesis/BenchMarkDatanodeDispatcher.java    |   280 +
 .../genesis/BenchMarkMetadataStoreReads.java    |    67 +
 .../genesis/BenchMarkMetadataStoreWrites.java   |    60 +
 .../ozone/genesis/BenchMarkRocksDbStore.java    |   115 +
 .../apache/hadoop/ozone/genesis/Genesis.java    |    59 +
 .../ozone/genesis/GenesisMemoryProfiler.java    |    59 +
 .../hadoop/ozone/genesis/GenesisUtil.java       |    90 +
 .../hadoop/ozone/genesis/package-info.java      |    25 +
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   709 +
 .../hadoop/ozone/scm/cli/package-info.java      |    22 +
 .../org/apache/hadoop/test/OzoneTestDriver.java |    59 +
 hadoop-project-dist/pom.xml                     |     6 +-
 hadoop-project/pom.xml                          |   306 +-
 hadoop-project/src/site/site.xml                |    14 +
 .../src/site/resources/css/site.css             |    30 +
 .../hadoop/fs/s3a/S3ABlockOutputStream.java     |     7 +-
 .../apache/hadoop/fs/s3a/S3ARetryPolicy.java    |     6 +-
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   |    20 +-
 .../fs/s3a/s3guard/LocalMetadataStore.java      |    20 +-
 .../hadoop/fs/s3a/s3guard/MetadataStore.java    |    14 +-
 .../s3a/s3guard/MetadataStoreCapabilities.java  |    43 +
 .../fs/s3a/s3guard/NullMetadataStore.java       |     4 +
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java      |    10 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   |    11 +-
 .../hadoop-aws/src/site/resources/css/site.css  |    30 +
 .../org/apache/hadoop/fs/s3a/TestInvoker.java   |    14 +-
 .../hadoop/fs/s3a/TestS3ABlockOutputStream.java |    66 +
 .../s3guard/AbstractS3GuardToolTestBase.java    |    21 +-
 .../s3a/s3guard/ITestS3GuardToolDynamoDB.java   |     9 +
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   |    58 +-
 hadoop-tools/hadoop-azure-datalake/pom.xml      |     2 +-
 .../src/site/resources/css/site.css             |    30 +
 .../src/site/resources/css/site.css             |    30 +
 .../hadoop/tools/mapred/TestCopyMapper.java     |   173 +-
 .../mapred/TestCopyMapperCompositeCrc.java      |    50 +
 hadoop-tools/hadoop-ozone/pom.xml               |   199 +
 .../org/apache/hadoop/fs/ozone/Constants.java   |    48 +
 .../java/org/apache/hadoop/fs/ozone/OzFs.java   |    43 +
 .../hadoop/fs/ozone/OzoneFSInputStream.java     |    79 +
 .../hadoop/fs/ozone/OzoneFSOutputStream.java    |    59 +
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |   700 +
 .../apache/hadoop/fs/ozone/package-info.java    |    30 +
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |   155 +
 .../fs/ozone/TestOzoneFileInterfaces.java       |   230 +
 .../contract/ITestOzoneContractCreate.java      |    48 +
 .../contract/ITestOzoneContractDelete.java      |    48 +
 .../contract/ITestOzoneContractDistCp.java      |    50 +
 .../ITestOzoneContractGetFileStatus.java        |    61 +
 .../ozone/contract/ITestOzoneContractMkdir.java |    48 +
 .../ozone/contract/ITestOzoneContractOpen.java  |    47 +
 .../contract/ITestOzoneContractRename.java      |    49 +
 .../contract/ITestOzoneContractRootDir.java     |    51 +
 .../ozone/contract/ITestOzoneContractSeek.java  |    47 +
 .../hadoop/fs/ozone/contract/OzoneContract.java |   123 +
 .../src/test/resources/contract/ozone.xml       |   113 +
 .../src/test/resources/log4j.properties         |    23 +
 hadoop-tools/hadoop-sls/pom.xml                 |     1 +
 .../src/main/html/simulate.html.template        |    24 +-
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |    80 +-
 .../sls/scheduler/SLSCapacityScheduler.java     |    29 +
 .../yarn/sls/scheduler/SchedulerMetrics.java    |    42 +
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  |    24 +-
 .../apache/hadoop/yarn/sls/web/SLSWebApp.java   |    70 +-
 .../hadoop/yarn/sls/utils/TestSLSUtils.java     |    25 +
 .../test/resources/nodes-with-resources.json    |    19 +
 hadoop-tools/hadoop-tools-dist/pom.xml          |    15 +
 hadoop-tools/pom.xml                            |    12 +-
 .../MySQL/FederationStateStoreTables.sql        |     2 +-
 .../jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml   |  3146 ++
 .../jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml   |  3034 ++
 .../Apache_Hadoop_YARN_Server_Common_3.1.0.xml  |  1331 +
 .../hadoop/yarn/api/ApplicationConstants.java   |    10 +
 .../yarn/api/records/ApplicationReport.java     |    45 +-
 .../timelineservice/SubApplicationEntity.java   |    50 +
 .../yarn/api/resource/PlacementConstraints.java |    24 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |    48 +-
 .../src/main/proto/yarn_protos.proto            |     1 +
 .../api/resource/TestPlacementConstraints.java  |     4 +-
 .../distributedshell/ApplicationMaster.java     |    68 +-
 .../distributedshell/TestDSAppMaster.java       |     8 +-
 .../hadoop-yarn-services-api/pom.xml            |     5 +
 .../yarn/service/client/ApiServiceClient.java   |    93 +-
 .../client/SystemServiceManagerImpl.java        |   391 +
 .../hadoop/yarn/service/webapp/ApiServer.java   |   252 +-
 .../definition/YARN-Services-Examples.md        |   240 +-
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |    31 +-
 .../hadoop/yarn/service/ServiceClientTest.java  |   121 +-
 .../hadoop/yarn/service/TestApiServer.java      |   141 +-
 .../service/client/TestApiServiceClient.java    |    45 +-
 .../client/TestSystemServiceManagerImpl.java    |   182 +
 .../resources/system-services/bad/bad.yarnfile  |    16 +
 .../sync/user1/example-app1.yarnfile            |    16 +
 .../sync/user1/example-app2.yarnfile            |    16 +
 .../sync/user1/example-app3.json                |    16 +
 .../sync/user2/example-app1.yarnfile            |    16 +
 .../sync/user2/example-app2.yarnfile            |    16 +
 .../hadoop-yarn-services-core/pom.xml           |     5 +
 .../hadoop/yarn/service/ClientAMProtocol.java   |     6 +
 .../hadoop/yarn/service/ClientAMService.java    |    39 +-
 .../yarn/service/ContainerFailureTracker.java   |     7 +-
 .../hadoop/yarn/service/ServiceContext.java     |    10 +
 .../hadoop/yarn/service/ServiceEvent.java       |    10 +
 .../hadoop/yarn/service/ServiceEventType.java   |     2 +-
 .../hadoop/yarn/service/ServiceManager.java     |   156 +-
 .../hadoop/yarn/service/ServiceMaster.java      |    18 +-
 .../hadoop/yarn/service/ServiceScheduler.java   |    73 +-
 .../yarn/service/api/records/BaseResource.java  |     2 +-
 .../yarn/service/api/records/Component.java     |    21 +
 .../yarn/service/api/records/ConfigFile.java    |     3 +-
 .../service/api/records/ContainerState.java     |     2 +-
 .../service/api/records/ReadinessCheck.java     |     1 +
 .../yarn/service/api/records/Service.java       |    24 +
 .../yarn/service/api/records/ServiceState.java  |     3 +-
 .../yarn/service/client/ClientAMProxy.java      |     5 +-
 .../yarn/service/client/ServiceClient.java      |   347 +-
 .../yarn/service/component/Component.java       |   251 +-
 .../yarn/service/component/ComponentEvent.java  |    10 +
 .../service/component/ComponentEventType.java   |     2 +-
 .../component/instance/ComponentInstance.java   |    77 +-
 .../instance/ComponentInstanceEventType.java    |     3 +-
 .../yarn/service/conf/RestApiConstants.java     |    12 +
 .../yarn/service/conf/YarnServiceConf.java      |    87 +
 .../yarn/service/conf/YarnServiceConstants.java |     1 +
 .../containerlaunch/ContainerLaunchService.java |   100 +-
 .../yarn/service/exceptions/ErrorStrings.java   |     2 +
 .../pb/client/ClientAMProtocolPBClientImpl.java |    14 +
 .../service/ClientAMProtocolPBServiceImpl.java  |    12 +
 .../ComponentHealthThresholdMonitor.java        |   151 +
 .../yarn/service/monitor/ServiceMonitor.java    |     7 +-
 .../service/monitor/probe/DefaultProbe.java     |    99 +
 .../yarn/service/monitor/probe/HttpProbe.java   |    37 +-
 .../yarn/service/monitor/probe/MonitorKeys.java |    12 +
 .../service/monitor/probe/MonitorUtils.java     |    14 +-
 .../yarn/service/monitor/probe/PortProbe.java   |    24 +-
 .../yarn/service/monitor/probe/Probe.java       |    18 +-
 .../provider/AbstractClientProvider.java        |    23 +-
 .../provider/AbstractProviderService.java       |    40 +-
 .../yarn/service/provider/ProviderService.java  |     7 +-
 .../yarn/service/provider/ProviderUtils.java    |   118 +-
 .../provider/docker/DockerProviderService.java  |    17 +-
 .../yarn/service/utils/CoreFileSystem.java      |     7 +
 .../yarn/service/utils/ServiceApiUtil.java      |    70 +-
 .../service/utils/ServiceRegistryUtils.java     |    60 +
 .../src/main/proto/ClientAMProtocol.proto       |    11 +
 .../hadoop/yarn/service/MockServiceAM.java      |    25 +
 .../hadoop/yarn/service/ServiceTestUtils.java   |    26 +-
 .../hadoop/yarn/service/TestServiceAM.java      |    55 +
 .../hadoop/yarn/service/TestServiceManager.java |   122 +-
 .../yarn/service/TestYarnNativeServices.java    |   268 +-
 .../yarn/service/client/TestServiceCLI.java     |   192 +-
 .../yarn/service/client/TestServiceClient.java  |   205 +-
 .../yarn/service/component/TestComponent.java   |   265 +
 .../instance/TestComponentInstance.java         |    88 +
 .../service/monitor/TestServiceMonitor.java     |     1 +
 .../service/monitor/probe/TestDefaultProbe.java |   155 +
 .../service/provider/TestProviderUtils.java     |   164 +
 .../providers/TestAbstractClientProvider.java   |    44 +
 .../hadoop/yarn/client/api/AppAdminClient.java  |    27 +-
 .../yarn/client/api/impl/AMRMClientImpl.java    |     1 -
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |    65 +
 .../hadoop/yarn/client/ProtocolHATestBase.java  |     2 +-
 .../yarn/client/api/impl/TestAHSClient.java     |     8 +-
 .../yarn/client/api/impl/TestYarnClient.java    |     8 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java     |    63 +-
 .../impl/pb/ApplicationReportPBImpl.java        |    11 +
 .../PlacementConstraintTransformations.java     |     4 +-
 .../yarn/client/api/TimelineV2Client.java       |    47 +-
 .../client/api/impl/TimelineClientImpl.java     |    23 +-
 .../client/api/impl/TimelineV2ClientImpl.java   |    30 +-
 .../java/org/apache/hadoop/yarn/util/Apps.java  |   115 +-
 .../yarn/util/DockerClientConfigHandler.java    |    19 +-
 .../apache/hadoop/yarn/util/RackResolver.java   |    49 +-
 .../yarn/util/timeline/TimelineUtils.java       |     3 +-
 .../src/main/resources/yarn-default.xml         |    17 +-
 .../hadoop/yarn/api/TestApplicatonReport.java   |     2 +-
 .../org/apache/hadoop/yarn/util/TestApps.java   |   136 +
 .../hadoop/registry/server/dns/RegistryDNS.java |     3 +
 .../ApplicationHistoryManagerImpl.java          |     2 +-
 .../TestFileSystemApplicationHistoryStore.java  |    18 +-
 .../http/RMAuthenticationFilterInitializer.java |    51 +-
 .../server/service/SystemServiceManager.java    |    25 +
 .../yarn/server/service/package-info.java       |    27 +
 ...TimelineAuthenticationFilterInitializer.java |    47 +-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |     3 +-
 .../hadoop/yarn/server/webapp/AppBlock.java     |     7 +-
 .../hadoop/yarn/server/webapp/AppsBlock.java    |     5 +-
 .../hadoop/yarn/server/webapp/WebPageUtils.java |     2 +-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |     6 +
 .../server/nodemanager/ContainerExecutor.java   |    11 +
 .../nodemanager/DefaultContainerExecutor.java   |     6 +
 .../server/nodemanager/DirectoryCollection.java |    39 +-
 .../nodemanager/LinuxContainerExecutor.java     |   132 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java |     2 +-
 .../containermanager/AuxServices.java           |   175 +-
 .../containermanager/ContainerManagerImpl.java  |     3 +-
 .../container/ContainerImpl.java                |     3 +-
 .../launcher/ContainerLaunch.java               |    57 +-
 .../launcher/ContainerRelaunch.java             |    38 +-
 .../linux/privileged/PrivilegedOperation.java   |     4 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |    11 +-
 .../DelegatingLinuxContainerRuntime.java        |     9 +
 .../runtime/DockerLinuxContainerRuntime.java    |    97 +-
 .../JavaSandboxLinuxContainerRuntime.java       |    10 +
 .../linux/runtime/docker/DockerClient.java      |    55 +
 .../linux/runtime/docker/DockerCommand.java     |    32 +
 .../runtime/docker/DockerCommandExecutor.java   |    38 +-
 .../runtime/docker/DockerInspectCommand.java    |    19 +
 .../linux/runtime/docker/DockerRmCommand.java   |    16 +
 .../runtime/docker/DockerStartCommand.java      |    29 +
 .../localizer/ResourceLocalizationService.java  |    20 +
 .../runtime/ContainerRuntime.java               |    10 +
 .../scheduler/ContainerScheduler.java           |     7 +-
 .../recovery/NMLeveldbStateStoreService.java    |    16 +-
 .../timelineservice/NMTimelinePublisher.java    |    15 +-
 .../impl/container-executor.c                   |    46 +-
 .../impl/container-executor.h                   |    10 +-
 .../main/native/container-executor/impl/main.c  |    47 +-
 .../container-executor/impl/utils/docker-util.c |   141 +-
 .../container-executor/impl/utils/docker-util.h |    11 +
 .../test/utils/test_docker_util.cc              |   117 +-
 .../nodemanager/TestLinuxContainerExecutor.java |    12 +
 .../amrmproxy/BaseAMRMProxyTest.java            |     5 +
 .../amrmproxy/TestAMRMProxyService.java         |    42 +
 .../containermanager/TestAuxServices.java       |   167 +-
 .../TestContainerManagerRecovery.java           |    51 +
 .../launcher/TestContainerRelaunch.java         |     2 +-
 .../launcher/TestContainersLauncher.java        |     2 +-
 .../runtime/TestDockerContainerRuntime.java     |   325 +-
 .../docker/TestDockerCommandExecutor.java       |    94 +-
 .../runtime/docker/TestDockerStartCommand.java  |    53 +
 .../TestResourceLocalizationService.java        |   125 +
 .../TestContainersMonitorResourceChange.java    |     5 +
 .../TestNMLeveldbStateStoreService.java         |    16 +
 .../TestNMTimelinePublisher.java                |     1 -
 .../server/resourcemanager/RMServerUtils.java   |     5 +-
 .../server/resourcemanager/ResourceManager.java |    56 +-
 .../resourcemanager/amlauncher/AMLauncher.java  |    23 +-
 .../metrics/TimelineServiceV1Publisher.java     |     2 +
 .../metrics/TimelineServiceV2Publisher.java     |     2 +
 .../recovery/FileSystemRMStateStore.java        |     4 +-
 .../recovery/LeveldbRMStateStore.java           |    14 +-
 .../recovery/MemoryRMStateStore.java            |     2 +-
 .../resourcemanager/recovery/RMStateStore.java  |    18 +-
 .../recovery/ZKRMStateStore.java                |     4 +-
 .../recovery/records/ApplicationStateData.java  |    24 +-
 .../impl/pb/ApplicationStateDataPBImpl.java     |    13 +
 .../AbstractSchedulerPlanFollower.java          |     5 +-
 .../server/resourcemanager/rmapp/RMApp.java     |    10 +-
 .../resourcemanager/rmapp/RMAppEvent.java       |     6 +
 .../resourcemanager/rmapp/RMAppEventType.java   |     1 +
 .../server/resourcemanager/rmapp/RMAppImpl.java |    41 +-
 .../rmapp/attempt/RMAppAttemptEvent.java        |     7 +
 .../rmapp/attempt/RMAppAttemptImpl.java         |    51 +-
 .../rmapp/attempt/RMAppAttemptMetrics.java      |     9 +-
 .../rmcontainer/RMContainerImpl.java            |    30 +-
 .../resourcemanager/scheduler/Allocation.java   |    12 +
 .../scheduler/SchedulerApplicationAttempt.java  |    13 +-
 .../scheduler/SchedulerUtils.java               |    33 +-
 .../capacity/AutoCreatedLeafQueue.java          |     3 +-
 .../AutoCreatedQueueManagementPolicy.java       |    12 +-
 .../scheduler/capacity/CapacityScheduler.java   |    11 +-
 .../CapacitySchedulerConfiguration.java         |    36 +
 .../scheduler/capacity/LeafQueue.java           |    11 +
 .../scheduler/capacity/ManagedParentQueue.java  |     5 +-
 .../allocator/AbstractContainerAllocator.java   |     9 +-
 .../allocator/RegularContainerAllocator.java    |    42 +-
 .../GuaranteedOrZeroCapacityOverTimePolicy.java |   573 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |    10 +
 .../constraint/PlacementConstraintsUtil.java    |     8 +-
 .../scheduler/fair/AllocationConfiguration.java |    20 -
 .../placement/PendingAskUpdateResult.java       |     8 +
 .../security/DelegationTokenRenewer.java        |     4 +
 .../webapp/FairSchedulerAppsBlock.java          |     2 +
 .../resourcemanager/webapp/NodesPage.java       |     2 +-
 .../resourcemanager/webapp/RMAppsBlock.java     |     2 +
 .../resourcemanager/webapp/RMWebServices.java   |    13 +-
 .../resourcemanager/webapp/dao/AppInfo.java     |     6 +
 .../yarn_server_resourcemanager_recovery.proto  |     1 +
 .../yarn/server/resourcemanager/MockAM.java     |     2 +-
 .../yarn/server/resourcemanager/MockNM.java     |    15 +
 .../server/resourcemanager/TestAppManager.java  |    20 +-
 .../TestApplicationMasterLauncher.java          |    80 +-
 .../resourcemanager/TestClientRMService.java    |    14 +-
 .../resourcemanager/TestRMAdminService.java     |     2 +
 .../applicationsmanager/MockAsm.java            |    15 +-
 .../TestCombinedSystemMetricsPublisher.java     |    26 +
 .../metrics/TestSystemMetricsPublisher.java     |     1 +
 .../TestSystemMetricsPublisherForV2.java        |     1 +
 .../TestPreemptionForQueueWithPriorities.java   |   147 +
 ...pacityPreemptionPolicyInterQueueWithDRF.java |    68 +
 .../TestUserGroupMappingPlacementRule.java      |     0
 .../recovery/RMStateStoreTestBase.java          |    18 +-
 .../recovery/TestFSRMStateStore.java            |     3 +-
 .../recovery/TestLeveldbRMStateStore.java       |     1 +
 .../recovery/TestZKRMStateStore.java            |     3 +-
 .../server/resourcemanager/rmapp/MockRMApp.java |     6 +
 .../rmapp/TestRMAppTransitions.java             |     2 +-
 .../TestCapacitySchedulerAsyncScheduling.java   |    91 +
 ...stCapacitySchedulerAutoCreatedQueueBase.java |   241 +-
 .../TestCapacitySchedulerAutoQueueCreation.java |   233 +-
 ...TestCapacitySchedulerSurgicalPreemption.java |   150 +
 .../TestQueueManagementDynamicEditPolicy.java   |    30 +-
 .../TestPlacementConstraintsUtil.java           |    88 +
 .../fair/TestAllocationFileLoaderService.java   |    35 -
 ...stSingleConstraintAppPlacementAllocator.java |    36 +-
 .../security/TestDelegationTokenRenewer.java    |    19 +
 .../security/TestRMAuthenticationFilter.java    |    81 +
 .../resourcemanager/webapp/TestRMWebApp.java    |     2 +-
 .../webapp/TestRMWebServicesApps.java           |    14 +-
 .../webapp/FederationInterceptorREST.java       |    52 +-
 .../MockDefaultRequestInterceptorREST.java      |    16 +
 .../webapp/TestFederationInterceptorREST.java   |    56 +
 ...stTimelineReaderWebServicesHBaseStorage.java |     7 +-
 .../TestHBaseTimelineStorageEntities.java       |     3 +-
 .../storage/HBaseTimelineWriterImpl.java        |     3 +-
 .../storage/TimelineSchemaCreator.java          |    10 +
 .../storage/domain/DomainTableRW.java           |    92 +
 .../storage/domain/package-info.java            |    28 +
 .../storage/domain/DomainColumn.java            |   111 +
 .../storage/domain/DomainColumnFamily.java      |    52 +
 .../storage/domain/DomainRowKey.java            |   179 +
 .../storage/domain/DomainTable.java             |    45 +
 .../storage/domain/package-info.java            |    28 +
 .../storage/common/TestRowKeys.java             |    32 +
 .../collector/TimelineCollectorWebService.java  |    19 +-
 .../reader/TimelineParserForCompareExpr.java    |     7 +-
 .../reader/TimelineParserForEqualityExpr.java   |     7 +-
 .../reader/TimelineReaderServer.java            |     2 +-
 ...TestPerNodeTimelineCollectorsAuxService.java |     2 +
 .../TestTimelineReaderWebServicesUtils.java     |    25 +
 .../server/webproxy/amfilter/AmIpFilter.java    |    13 +-
 .../server/webproxy/amfilter/TestAmFilter.java  |    40 +
 .../src/site/markdown/CapacityScheduler.md      |    18 +-
 .../src/site/markdown/NodeManager.md            |    49 +-
 .../site/markdown/PlacementConstraints.md.vm    |    82 +-
 .../src/site/markdown/TimelineServiceV2.md      |     2 +-
 .../src/site/markdown/yarn-service/Concepts.md  |     6 +-
 .../markdown/yarn-service/Configurations.md     |   131 +-
 .../src/site/markdown/yarn-service/Overview.md  |    14 +-
 .../site/markdown/yarn-service/QuickStart.md    |    55 +-
 .../site/markdown/yarn-service/RegistryDNS.md   |     4 +-
 .../markdown/yarn-service/ServiceDiscovery.md   |     4 +-
 .../markdown/yarn-service/SystemServices.md     |    66 +
 .../markdown/yarn-service/YarnServiceAPI.md     |   117 +-
 .../main/webapp/app/components/nodes-heatmap.js |     4 +-
 .../main/webapp/app/components/timeline-view.js |    45 +-
 .../webapp/app/controllers/yarn-app-attempt.js  |     9 +-
 .../webapp/app/controllers/yarn-app/attempts.js |    11 +-
 .../app/controllers/yarn-component-instance.js  |     3 +-
 .../app/controllers/yarn-component-instances.js |     3 +-
 .../webapp/app/controllers/yarn-nodes/table.js  |     8 +-
 .../app/controllers/yarn-tools/yarn-conf.js     |    10 +-
 .../src/main/webapp/app/helpers/node-name.js    |    46 -
 .../src/main/webapp/app/initializers/loader.js  |     2 +
 .../main/webapp/app/models/yarn-app-attempt.js  |     5 +
 .../app/models/yarn-queue/capacity-queue.js     |     2 +-
 .../templates/components/app-attempt-table.hbs  |     2 +-
 .../app/templates/components/timeline-view.hbs  |    12 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |     2 +
 .../webapp/app/templates/yarn-app/attempts.hbs  |     2 +
 .../src/main/webapp/app/templates/yarn-node.hbs |    25 +
 .../webapp/app/templates/yarn-node/info.hbs     |     2 -
 .../app/templates/yarn-node/yarn-nm-gpu.hbs     |     2 -
 .../app/templates/yarn-tools/yarn-conf.hbs      |     6 +-
 .../src/main/webapp/tests/helpers/resolver.js   |     2 +-
 .../src/main/webapp/tests/index.html            |     1 +
 .../components/breadcrumb-bar-test.js           |    31 +-
 .../unit/adapters/yarn-app-attempt-test.js      |    59 +
 .../webapp/tests/unit/adapters/yarn-app-test.js |    36 +-
 .../unit/adapters/yarn-container-log-test.js    |    18 +-
 .../tests/unit/adapters/yarn-container-test.js  |    38 +
 .../tests/unit/adapters/yarn-node-app-test.js   |    42 +-
 .../unit/adapters/yarn-node-container-test.js   |    40 +-
 .../tests/unit/adapters/yarn-node-test.js       |    15 +-
 .../tests/unit/adapters/yarn-rm-node-test.js    |    13 +-
 .../tests/unit/controllers/yarn-app-test.js     |    13 +-
 .../webapp/tests/unit/helpers/node-name-test.js |    28 -
 .../webapp/tests/unit/initializers/env-test.js  |     5 +-
 .../tests/unit/initializers/hosts-test.js       |     5 +-
 .../tests/unit/initializers/jquery-test.js      |    41 -
 .../tests/unit/models/cluster-info-test.js      |    37 +
 .../tests/unit/models/cluster-metric-test.js    |    81 +
 .../tests/unit/models/yarn-app-attempt-test.js  |    65 +
 .../webapp/tests/unit/models/yarn-app-test.js   |    61 +-
 .../unit/models/yarn-container-log-test.js      |    18 -
 .../tests/unit/models/yarn-container-test.js    |    49 +
 .../tests/unit/models/yarn-node-app-test.js     |    14 +-
 .../unit/models/yarn-node-container-test.js     |    16 +-
 .../webapp/tests/unit/models/yarn-node-test.js  |    18 -
 .../webapp/tests/unit/models/yarn-queue-test.js |   122 +
 .../tests/unit/models/yarn-rm-node-test.js      |    14 +-
 .../webapp/tests/unit/models/yarn-user-test.js  |    32 +
 .../tests/unit/routes/cluster-overview-test.js  |   143 +
 .../tests/unit/routes/yarn-app-attempt-test.js  |    30 +
 .../webapp/tests/unit/routes/yarn-app-test.js   |   152 +
 .../webapp/tests/unit/routes/yarn-apps-test.js  |   113 +-
 .../webapp/tests/unit/routes/yarn-node-test.js  |    19 +-
 .../webapp/tests/unit/routes/yarn-nodes-test.js |    13 +-
 .../tests/unit/routes/yarn-queues-test.js       |   286 +-
 .../tests/unit/serializers/cluster-info-test.js |    70 +
 .../unit/serializers/cluster-metric-test.js     |    90 +
 .../unit/serializers/yarn-app-attempt-test.js   |   101 +
 .../tests/unit/serializers/yarn-app-test.js     |    62 +-
 .../unit/serializers/yarn-container-test.js     |    30 +
 .../unit/serializers/yarn-node-app-test.js      |    13 +-
 .../serializers/yarn-node-container-test.js     |    13 +-
 .../tests/unit/serializers/yarn-rm-node-test.js |    14 +-
 pom.xml                                         |    99 +
 1416 files changed, 167327 insertions(+), 4681 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
----------------------------------------------------------------------
diff --cc hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 4b03990,70fde60..2734a95
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@@ -416,12 -416,12 +416,13 @@@ public class ProtobufRpcEngine implemen
          Configuration conf, String bindAddress, int port, int numHandlers,
          int numReaders, int queueSizePerHandler, boolean verbose,
          SecretManager<? extends TokenIdentifier> secretManager, 
 -        String portRangeConfig)
 +        String portRangeConfig, AlignmentContext alignmentContext)
          throws IOException {
        super(bindAddress, port, null, numHandlers,
-           numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
-               .getClass().getName()), secretManager, portRangeConfig);
+           numReaders, queueSizePerHandler, conf,
+           serverNameFromClass(protocolImpl.getClass()), secretManager,
+           portRangeConfig);
 +      setAlignmentContext(alignmentContext);
        this.verbose = verbose;  
        registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
            protocolImpl);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --cc hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index ff4a63a,76d9c40..ab7eccc
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@@ -140,8 -140,11 +140,12 @@@ public abstract class Server 
    private RpcSaslProto negotiateResponse;
    private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
    private Tracer tracer;
 +  private AlignmentContext alignmentContext;
-   
+   /**
+    * Logical name of the server used in metrics and monitor.
+    */
+   private final String serverName;
 -  
++
    /**
     * Add exception classes for which server won't log stack traces.
     *


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-8210. AMRMClient logging on every heartbeat to track updation of AM RM token causes too many log lines to be generated in AM logs. (Suma Shivaprasad via wangda)

Posted by xk...@apache.org.
YARN-8210. AMRMClient logging on every heartbeat to track updation of AM RM token causes too many log lines to be generated in AM logs. (Suma Shivaprasad via wangda)

Change-Id: I70edd6e301fd5e78d479e1882aedc9332a0827aa


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1833d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1833d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1833d9b

Branch: refs/heads/HDFS-12943
Commit: b1833d9ba2c078582161da45ac392dd5c361dcdf
Parents: ef3ecc3
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Apr 27 13:07:38 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Apr 27 13:07:38 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1833d9b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index a8e4dfc..ef849b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -1111,7 +1111,6 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     // to ensure we replace the previous token setup by the RM.
     // Afterwards we can update the service address for the RPC layer.
     UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
-    LOG.info("Updating with new AMRMToken");
     currentUGI.addToken(amrmToken);
     amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: YARN-8222. Fix potential NPE when gets RMApp from RM context. Contributed by Tao Yang.

Posted by xk...@apache.org.
YARN-8222. Fix potential NPE when gets RMApp from RM context. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/251f5288
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/251f5288
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/251f5288

Branch: refs/heads/HDFS-12943
Commit: 251f528814c4a4647cac0af6effb9a73135db180
Parents: 3265b55
Author: Weiwei Yang <ww...@apache.org>
Authored: Wed May 2 17:54:46 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Wed May 2 17:54:46 2018 +0800

----------------------------------------------------------------------
 .../rmcontainer/RMContainerImpl.java            | 30 +++++++++++---------
 .../scheduler/SchedulerApplicationAttempt.java  | 13 +++++----
 2 files changed, 23 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/251f5288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 541621b..b5c8e7c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
@@ -737,21 +738,22 @@ public class RMContainerImpl implements RMContainer {
 
     private static void updateAttemptMetrics(RMContainerImpl container) {
       Resource resource = container.getContainer().getResource();
-      RMAppAttempt rmAttempt = container.rmContext.getRMApps()
-          .get(container.getApplicationAttemptId().getApplicationId())
-          .getCurrentAppAttempt();
-
-      if (rmAttempt != null) {
-        long usedMillis = container.finishTime - container.creationTime;
-        rmAttempt.getRMAppAttemptMetrics()
-            .updateAggregateAppResourceUsage(resource, usedMillis);
-        // If this is a preempted container, update preemption metrics
-        if (ContainerExitStatus.PREEMPTED == container.finishedStatus
-            .getExitStatus()) {
+      RMApp app = container.rmContext.getRMApps()
+          .get(container.getApplicationAttemptId().getApplicationId());
+      if (app != null) {
+        RMAppAttempt rmAttempt = app.getCurrentAppAttempt();
+        if (rmAttempt != null) {
+          long usedMillis = container.finishTime - container.creationTime;
           rmAttempt.getRMAppAttemptMetrics()
-              .updatePreemptionInfo(resource, container);
-          rmAttempt.getRMAppAttemptMetrics()
-              .updateAggregatePreemptedAppResourceUsage(resource, usedMillis);
+              .updateAggregateAppResourceUsage(resource, usedMillis);
+          // If this is a preempted container, update preemption metrics
+          if (ContainerExitStatus.PREEMPTED == container.finishedStatus
+              .getExitStatus()) {
+            rmAttempt.getRMAppAttemptMetrics()
+                .updatePreemptionInfo(resource, container);
+            rmAttempt.getRMAppAttemptMetrics()
+                .updateAggregatePreemptedAppResourceUsage(resource, usedMillis);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/251f5288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 857e736..005569c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1241,12 +1241,13 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
       return;
     }
 
-    RMAppAttempt attempt =
-        rmContext.getRMApps().get(attemptId.getApplicationId())
-          .getCurrentAppAttempt();
-    if (attempt != null) {
-      attempt.getRMAppAttemptMetrics().incNumAllocatedContainers(containerType,
-        requestType);
+    RMApp app = rmContext.getRMApps().get(attemptId.getApplicationId());
+    if (app != null) {
+      RMAppAttempt attempt = app.getCurrentAppAttempt();
+      if (attempt != null) {
+        attempt.getRMAppAttemptMetrics()
+            .incNumAllocatedContainers(containerType, requestType);
+      }
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: YARN-8228. Added hostname length check for docker container. Contributed by Shane Kumpf

Posted by xk...@apache.org.
YARN-8228.  Added hostname length check for docker container.
            Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a966ec6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a966ec6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a966ec6e

Branch: refs/heads/HDFS-12943
Commit: a966ec6e23b3ac8e233b2cf9b9ddaa6628a8c996
Parents: 919865a
Author: Eric Yang <ey...@apache.org>
Authored: Mon Apr 30 19:12:53 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Mon Apr 30 19:12:53 2018 -0400

----------------------------------------------------------------------
 .../linux/runtime/DockerLinuxContainerRuntime.java     |  6 ++++++
 .../linux/runtime/TestDockerContainerRuntime.java      | 13 +++++++++++++
 2 files changed, 19 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966ec6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 999b343..9c05c59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -199,6 +199,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       HOSTNAME_PATTERN);
   private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
       "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
+  private static final int HOST_NAME_LENGTH = 64;
 
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
@@ -541,6 +542,11 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         throw new ContainerExecutionException("Hostname '" + hostname
             + "' doesn't match docker hostname pattern");
       }
+      if (hostname.length() > HOST_NAME_LENGTH) {
+        throw new ContainerExecutionException(
+            "Hostname can not be greater than " + HOST_NAME_LENGTH
+                + " characters: " + hostname);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966ec6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index a333bac..6ad35b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1539,6 +1539,19 @@ public class TestDockerContainerRuntime {
     }
   }
 
+  @Test
+  public void testValidDockerHostnameLength() throws Exception {
+    String validLength = "example.test.site";
+    DockerLinuxContainerRuntime.validateHostname(validLength);
+  }
+
+  @Test(expected = ContainerExecutionException.class)
+  public void testInvalidDockerHostnameLength() throws Exception {
+    String invalidLength =
+        "exampleexampleexampleexampleexampleexampleexampleexample.test.site";
+    DockerLinuxContainerRuntime.validateHostname(invalidLength);
+  }
+
   @SuppressWarnings("unchecked")
   private void checkVolumeCreateCommand()
       throws PrivilegedOperationException, IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HDFS-13525. RBF: Add unit test TestStateStoreDisabledNameservice. Contributed by Yiqun Lin.

Posted by xk...@apache.org.
HDFS-13525. RBF: Add unit test TestStateStoreDisabledNameservice. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3b416f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3b416f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3b416f6

Branch: refs/heads/HDFS-12943
Commit: a3b416f69dc3965f247603f657df33bd74fd723e
Parents: 7698737
Author: Inigo Goiri <in...@apache.org>
Authored: Thu May 3 11:24:57 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu May 3 11:24:57 2018 -0700

----------------------------------------------------------------------
 .../TestStateStoreDisabledNameservice.java      | 71 ++++++++++++++++++++
 1 file changed, 71 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3b416f6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java
new file mode 100644
index 0000000..353510a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.hadoop.hdfs.server.federation.store.records.DisabledNameservice;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test the basic {@link StateStoreService}
+ * {@link DisabledNameserviceStore} functionality.
+ */
+public class TestStateStoreDisabledNameservice extends TestStateStoreBase {
+
+  private static DisabledNameserviceStore disabledStore;
+
+  @Before
+  public void setup() throws IOException, InterruptedException {
+    disabledStore = getStateStore()
+        .getRegisteredRecordStore(DisabledNameserviceStore.class);
+    // Clear disabled nameservice registrations
+    assertTrue(clearRecords(getStateStore(), DisabledNameservice.class));
+  }
+
+  @Test
+  public void testDisableNameservice() throws IOException {
+    // no nameservices disabled firstly
+    Set<String> disabledNameservices = disabledStore.getDisabledNameservices();
+    assertEquals(0, disabledNameservices.size());
+
+    // disable two nameservices
+    disabledStore.disableNameservice("ns0");
+    disabledStore.disableNameservice("ns1");
+    disabledStore.loadCache(true);
+    // verify if the nameservices are disabled
+    disabledNameservices = disabledStore.getDisabledNameservices();
+    assertEquals(2, disabledNameservices.size());
+    assertTrue(disabledNameservices.contains("ns0")
+        && disabledNameservices.contains("ns1"));
+
+    // enable one nameservice
+    disabledStore.enableNameservice("ns0");
+    disabledStore.loadCache(true);
+    // verify the disabled nameservice again
+    disabledNameservices = disabledStore.getDisabledNameservices();
+    assertEquals(1, disabledNameservices.size());
+    assertTrue(disabledNameservices.contains("ns1"));
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDFS-13508. RBF: Normalize paths (automatically) when adding, updating, removing or listing mount table entries. Contributed by Ekanth S.

Posted by xk...@apache.org.
HDFS-13508. RBF: Normalize paths (automatically) when adding, updating, removing or listing mount table entries. Contributed by Ekanth S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48444060
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48444060
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48444060

Branch: refs/heads/HDFS-12943
Commit: 484440602c5b69fbd8106010603c61ae051056dd
Parents: f469628
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Apr 27 16:28:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Apr 27 16:28:17 2018 -0700

----------------------------------------------------------------------
 .../hdfs/tools/federation/RouterAdmin.java      |  16 +++
 .../federation/router/TestRouterAdminCLI.java   | 117 ++++++++++++++++++-
 2 files changed, 130 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48444060/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 17707dc..b0a2062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -322,6 +323,7 @@ public class RouterAdmin extends Configured implements Tool {
   public boolean addMount(String mount, String[] nss, String dest,
       boolean readonly, DestinationOrder order, ACLEntity aclInfo)
       throws IOException {
+    mount = normalizeFileSystemPath(mount);
     // Get the existing entry
     MountTableManager mountTable = client.getMountTableManager();
     GetMountTableEntriesRequest getRequest =
@@ -473,6 +475,7 @@ public class RouterAdmin extends Configured implements Tool {
   public boolean updateMount(String mount, String[] nss, String dest,
       boolean readonly, DestinationOrder order, ACLEntity aclInfo)
       throws IOException {
+    mount = normalizeFileSystemPath(mount);
     MountTableManager mountTable = client.getMountTableManager();
 
     // Create a new entry
@@ -519,6 +522,7 @@ public class RouterAdmin extends Configured implements Tool {
    * @throws IOException If it cannot be removed.
    */
   public boolean removeMount(String path) throws IOException {
+    path = normalizeFileSystemPath(path);
     MountTableManager mountTable = client.getMountTableManager();
     RemoveMountTableEntryRequest request =
         RemoveMountTableEntryRequest.newInstance(path);
@@ -538,6 +542,7 @@ public class RouterAdmin extends Configured implements Tool {
    * @throws IOException If it cannot be listed.
    */
   public void listMounts(String path) throws IOException {
+    path = normalizeFileSystemPath(path);
     MountTableManager mountTable = client.getMountTableManager();
     GetMountTableEntriesRequest request =
         GetMountTableEntriesRequest.newInstance(path);
@@ -798,6 +803,17 @@ public class RouterAdmin extends Configured implements Tool {
   }
 
   /**
+   * Normalize a path for that filesystem.
+   *
+   * @param path Path to normalize.
+   * @return Normalized path.
+   */
+  private static String normalizeFileSystemPath(final String path) {
+    Path normalizedPath = new Path(path);
+    return normalizedPath.toString();
+  }
+
+  /**
    * Inner class that stores ACL info of mount table.
    */
   static class ACLEntity {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48444060/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 4e84c33..2537c19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -159,6 +159,45 @@ public class TestRouterAdminCLI {
   }
 
   @Test
+  public void testAddMountTableNotNormalized() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-addmounttable-notnormalized";
+    String srcWithSlash = src + "/";
+    String dest = "/addmounttable-notnormalized";
+    String[] argv = new String[] {"-add", srcWithSlash, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    MountTable mountTable = getResponse.getEntries().get(0);
+
+    List<RemoteLocation> destinations = mountTable.getDestinations();
+    assertEquals(1, destinations.size());
+
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, destinations.get(0).getNameserviceId());
+    assertEquals(dest, destinations.get(0).getDest());
+    assertFalse(mountTable.isReadOnly());
+
+    // test mount table update behavior
+    dest = dest + "-new";
+    argv = new String[] {"-add", srcWithSlash, nsId, dest, "-readonly"};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(2, mountTable.getDestinations().size());
+    assertEquals(nsId, mountTable.getDestinations().get(1).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(1).getDest());
+    assertTrue(mountTable.isReadOnly());
+  }
+
+  @Test
   public void testAddOrderMountTable() throws Exception {
     testAddOrderMountTable(DestinationOrder.HASH);
     testAddOrderMountTable(DestinationOrder.LOCAL);
@@ -192,6 +231,7 @@ public class TestRouterAdminCLI {
   public void testListMountTable() throws Exception {
     String nsId = "ns0";
     String src = "/test-lsmounttable";
+    String srcWithSlash = src + "/";
     String dest = "/lsmounttable";
     String[] argv = new String[] {"-add", src, nsId, dest};
     assertEquals(0, ToolRunner.run(admin, argv));
@@ -203,6 +243,11 @@ public class TestRouterAdminCLI {
     assertEquals(0, ToolRunner.run(admin, argv));
     assertTrue(out.toString().contains(src));
 
+    // Test with not-normalized src input
+    argv = new String[] {"-ls", srcWithSlash};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(src));
+
     out.reset();
     GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
         .newInstance("/");
@@ -256,6 +301,33 @@ public class TestRouterAdminCLI {
   }
 
   @Test
+  public void testRemoveMountTableNotNormalized() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-rmmounttable-notnormalized";
+    String srcWithSlash = src + "/";
+    String dest = "/rmmounttable-notnormalized";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    // ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+
+    argv = new String[] {"-rm", srcWithSlash};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    assertEquals(0, getResponse.getEntries().size());
+  }
+
+  @Test
   public void testMountTableDefaultACL() throws Exception {
     String[] argv = new String[] {"-add", "/testpath0", "ns0", "/testdir0"};
     assertEquals(0, ToolRunner.run(admin, argv));
@@ -552,12 +624,12 @@ public class TestRouterAdminCLI {
   }
 
   @Test
-  public void testUpdateNameserviceDestinationForExistingMountTable() throws
+  public void testUpdateDestinationForExistingMountTable() throws
   Exception {
     // Add a mount table firstly
     String nsId = "ns0";
-    String src = "/test-updateNameserviceDestinationForExistingMountTable";
-    String dest = "/UpdateNameserviceDestinationForExistingMountTable";
+    String src = "/test-updateDestinationForExistingMountTable";
+    String dest = "/UpdateDestinationForExistingMountTable";
     String[] argv = new String[] {"-add", src, nsId, dest};
     assertEquals(0, ToolRunner.run(admin, argv));
 
@@ -590,6 +662,45 @@ public class TestRouterAdminCLI {
   }
 
   @Test
+  public void testUpdateDestinationForExistingMountTableNotNormalized() throws
+      Exception {
+    // Add a mount table firstly
+    String nsId = "ns0";
+    String src = "/test-updateDestinationForExistingMountTableNotNormalized";
+    String srcWithSlash = src + "/";
+    String dest = "/UpdateDestinationForExistingMountTableNotNormalized";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest =
+        GetMountTableEntriesRequest.newInstance(src);
+    GetMountTableEntriesResponse getResponse =
+        client.getMountTableManager().getMountTableEntries(getRequest);
+    // Ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+
+    // Update the destination
+    String newNsId = "ns1";
+    String newDest = "/newDestination";
+    argv = new String[] {"-update", srcWithSlash, newNsId, newDest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    // Ensure the destination updated successfully
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(newNsId,
+        mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(newDest, mountTable.getDestinations().get(0).getDest());
+  }
+
+  @Test
   public void testUpdateReadonlyUserGroupPermissionMountable()
       throws Exception {
     // Add a mount table


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HADOOP-15377. Improve debug messages in MetricsConfig.java

Posted by xk...@apache.org.
HADOOP-15377. Improve debug messages in MetricsConfig.java

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33768724
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33768724
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33768724

Branch: refs/heads/HDFS-12943
Commit: 33768724ff99d4966c24c9553eef207ed31a76d3
Parents: 1a95a45
Author: BELUGA BEHR <da...@gmail.com>
Authored: Wed May 2 17:09:22 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 2 17:09:22 2018 +0900

----------------------------------------------------------------------
 .../hadoop/metrics2/impl/MetricsConfig.java     | 50 ++++++++++++--------
 1 file changed, 30 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33768724/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index ac4a24e..027450c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -118,20 +118,23 @@ class MetricsConfig extends SubsetConfiguration {
                 .setListDelimiterHandler(new DefaultListDelimiterHandler(',')))
               .getConfiguration()
               .interpolatedConfiguration();
-        LOG.info("loaded properties from "+ fname);
-        LOG.debug(toString(cf));
+        LOG.info("Loaded properties from {}", fname);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Properties: {}", toString(cf));
+        }
         MetricsConfig mc = new MetricsConfig(cf, prefix);
-        LOG.debug(mc.toString());
+        LOG.debug("Metrics Config: {}", mc);
         return mc;
       } catch (ConfigurationException e) {
         // Commons Configuration defines the message text when file not found
         if (e.getMessage().startsWith("Could not locate")) {
+          LOG.debug("Could not locate file {}", fname, e);
           continue;
         }
         throw new MetricsConfigException(e);
       }
     }
-    LOG.warn("Cannot locate configuration: tried "+
+    LOG.warn("Cannot locate configuration: tried " +
              Joiner.on(",").join(fileNames));
     // default to an empty configuration
     return new MetricsConfig(new PropertiesConfiguration(), prefix);
@@ -168,7 +171,6 @@ class MetricsConfig extends SubsetConfiguration {
 
   Iterable<String> keys() {
     return new Iterable<String>() {
-      @SuppressWarnings("unchecked")
       @Override
       public Iterator<String> iterator() {
         return (Iterator<String>) getKeys();
@@ -186,21 +188,21 @@ class MetricsConfig extends SubsetConfiguration {
     Object value = super.getPropertyInternal(key);
     if (value == null) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("poking parent '"+ getParent().getClass().getSimpleName() +
-                  "' for key: "+ key);
+        LOG.debug("poking parent '" + getParent().getClass().getSimpleName() +
+                  "' for key: " + key);
       }
       return getParent().getProperty(key.startsWith(PREFIX_DEFAULT) ? key
                                      : PREFIX_DEFAULT + key);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("returning '"+ value +"' for key: "+ key);
-    }
+    LOG.debug("Returning '{}' for key: {}", value, key);
     return value;
   }
 
   <T extends MetricsPlugin> T getPlugin(String name) {
     String clsName = getClassName(name);
-    if (clsName == null) return null;
+    if (clsName == null) {
+      return null;
+    }
     try {
       Class<?> cls = Class.forName(clsName, true, getPluginLoader());
       @SuppressWarnings("unchecked")
@@ -213,9 +215,9 @@ class MetricsConfig extends SubsetConfiguration {
   }
 
   String getClassName(String prefix) {
-    String classKey = prefix.isEmpty() ? "class" : prefix +".class";
+    String classKey = prefix.isEmpty() ? "class" : prefix.concat(".class");
     String clsName = getString(classKey);
-    LOG.debug(clsName);
+    LOG.debug("Class name for prefix {} is {}", prefix, clsName);
     if (clsName == null || clsName.isEmpty()) {
       return null;
     }
@@ -223,25 +225,29 @@ class MetricsConfig extends SubsetConfiguration {
   }
 
   ClassLoader getPluginLoader() {
-    if (pluginLoader != null) return pluginLoader;
+    if (pluginLoader != null) {
+      return pluginLoader;
+    }
     final ClassLoader defaultLoader = getClass().getClassLoader();
     Object purls = super.getProperty(PLUGIN_URLS_KEY);
-    if (purls == null) return defaultLoader;
+    if (purls == null) {
+      return defaultLoader;
+    }
     Iterable<String> jars = SPLITTER.split((String) purls);
     int len = Iterables.size(jars);
-    if ( len > 0) {
+    if (len > 0) {
       final URL[] urls = new URL[len];
       try {
         int i = 0;
         for (String jar : jars) {
-          LOG.debug(jar);
+          LOG.debug("Parsing URL for {}", jar);
           urls[i++] = new URL(jar);
         }
       } catch (Exception e) {
         throw new MetricsConfigException(e);
       }
       if (LOG.isDebugEnabled()) {
-        LOG.debug("using plugin jars: "+ Iterables.toString(jars));
+        LOG.debug("Using plugin jars: {}", Iterables.toString(jars));
       }
       pluginLoader = doPrivileged(new PrivilegedAction<ClassLoader>() {
         @Override public ClassLoader run() {
@@ -259,9 +265,13 @@ class MetricsConfig extends SubsetConfiguration {
   MetricsFilter getFilter(String prefix) {
     // don't create filter instances without out options
     MetricsConfig conf = subset(prefix);
-    if (conf.isEmpty()) return null;
+    if (conf.isEmpty()) {
+      return null;
+    }
     MetricsFilter filter = getPlugin(prefix);
-    if (filter != null) return filter;
+    if (filter != null) {
+      return filter;
+    }
     // glob filter is assumed if pattern is specified but class is not.
     filter = new GlobFilter();
     filter.init(conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDFS-13488. RBF: Reject requests when a Router is overloaded. Contributed by Inigo Goiri.

Posted by xk...@apache.org.
HDFS-13488. RBF: Reject requests when a Router is overloaded. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37269261
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37269261
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37269261

Branch: refs/heads/HDFS-12943
Commit: 37269261d1232bc71708f30c76193188258ef4bd
Parents: 8f42daf
Author: Yiqun Lin <yq...@apache.org>
Authored: Wed May 2 14:49:39 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Wed May 2 14:49:39 2018 +0800

----------------------------------------------------------------------
 .../federation/metrics/FederationRPCMBean.java  |   2 +
 .../metrics/FederationRPCMetrics.java           |  10 +
 .../FederationRPCPerformanceMonitor.java        |   5 +
 .../server/federation/router/RBFConfigKeys.java |   3 +
 .../federation/router/RouterRpcClient.java      |  31 ++-
 .../federation/router/RouterRpcMonitor.java     |   6 +
 .../federation/router/RouterRpcServer.java      |  11 +-
 .../router/RouterSafeModeException.java         |  53 ----
 .../src/main/resources/hdfs-rbf-default.xml     |   9 +
 .../server/federation/FederationTestUtils.java  |   2 +-
 .../server/federation/StateStoreDFSCluster.java |  28 +++
 .../router/TestRouterClientRejectOverload.java  | 243 +++++++++++++++++++
 .../router/TestRouterRPCClientRetries.java      |  51 +---
 .../federation/router/TestRouterSafemode.java   |   3 +-
 14 files changed, 349 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
index 3e031fe..973c398 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
@@ -40,6 +40,8 @@ public interface FederationRPCMBean {
 
   long getProxyOpFailureStandby();
 
+  long getProxyOpFailureClientOverloaded();
+
   long getProxyOpNotImplemented();
 
   long getProxyOpRetries();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
index 94d3383..9ab4e5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
@@ -54,6 +54,8 @@ public class FederationRPCMetrics implements FederationRPCMBean {
   private MutableCounterLong proxyOpFailureStandby;
   @Metric("Number of operations to hit a standby NN")
   private MutableCounterLong proxyOpFailureCommunicate;
+  @Metric("Number of operations to hit a client overloaded Router")
+  private MutableCounterLong proxyOpFailureClientOverloaded;
   @Metric("Number of operations not implemented")
   private MutableCounterLong proxyOpNotImplemented;
   @Metric("Number of operation retries")
@@ -118,6 +120,14 @@ public class FederationRPCMetrics implements FederationRPCMBean {
     return proxyOpFailureCommunicate.value();
   }
 
+  public void incrProxyOpFailureClientOverloaded() {
+    proxyOpFailureClientOverloaded.incr();
+  }
+
+  @Override
+  public long getProxyOpFailureClientOverloaded() {
+    return proxyOpFailureClientOverloaded.value();
+  }
 
   public void incrProxyOpNotImplemented() {
     proxyOpNotImplemented.incr();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
index 547ebb5..2c2741e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
@@ -154,6 +154,11 @@ public class FederationRPCPerformanceMonitor implements RouterRpcMonitor {
   }
 
   @Override
+  public void proxyOpFailureClientOverloaded() {
+    metrics.incrProxyOpFailureClientOverloaded();
+  }
+
+  @Override
   public void proxyOpNotImplemented() {
     metrics.incrProxyOpNotImplemented();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 170b876..363db20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -113,6 +113,9 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
   public static final String DFS_ROUTER_CLIENT_MAX_ATTEMPTS =
       FEDERATION_ROUTER_PREFIX + "client.retry.max.attempts";
   public static final int DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT = 3;
+  public static final String DFS_ROUTER_CLIENT_REJECT_OVERLOAD =
+      FEDERATION_ROUTER_PREFIX + "client.reject.overload";
+  public static final boolean DFS_ROUTER_CLIENT_REJECT_OVERLOAD_DEFAULT = false;
 
   // HDFS Router State Store connection
   public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 513e867..3eb7241 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -35,13 +35,16 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -98,7 +101,7 @@ public class RouterRpcClient {
   /** Connection pool to the Namenodes per user for performance. */
   private final ConnectionManager connectionManager;
   /** Service to run asynchronous calls. */
-  private final ExecutorService executorService;
+  private final ThreadPoolExecutor executorService;
   /** Retry policy for router -> NN communication. */
   private final RetryPolicy retryPolicy;
   /** Optional perf monitor. */
@@ -131,8 +134,16 @@ public class RouterRpcClient {
     ThreadFactory threadFactory = new ThreadFactoryBuilder()
         .setNameFormat("RPC Router Client-%d")
         .build();
-    this.executorService = Executors.newFixedThreadPool(
-        numThreads, threadFactory);
+    BlockingQueue<Runnable> workQueue;
+    if (conf.getBoolean(
+        RBFConfigKeys.DFS_ROUTER_CLIENT_REJECT_OVERLOAD,
+        RBFConfigKeys.DFS_ROUTER_CLIENT_REJECT_OVERLOAD_DEFAULT)) {
+      workQueue = new ArrayBlockingQueue<>(numThreads);
+    } else {
+      workQueue = new LinkedBlockingQueue<>();
+    }
+    this.executorService = new ThreadPoolExecutor(numThreads, numThreads,
+        0L, TimeUnit.MILLISECONDS, workQueue, threadFactory);
 
     this.rpcMonitor = monitor;
 
@@ -1106,6 +1117,16 @@ public class RouterRpcClient {
       }
 
       return results;
+    } catch (RejectedExecutionException e) {
+      if (rpcMonitor != null) {
+        rpcMonitor.proxyOpFailureClientOverloaded();
+      }
+      int active = executorService.getActiveCount();
+      int total = executorService.getMaximumPoolSize();
+      String msg = "Not enough client threads " + active + "/" + total;
+      LOG.error(msg);
+      throw new StandbyException(
+          "Router " + routerId + " is overloaded: " + msg);
     } catch (InterruptedException ex) {
       LOG.error("Unexpected error while invoking API: {}", ex.getMessage());
       throw new IOException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
index df9aa11..7af71af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
@@ -76,6 +76,12 @@ public interface RouterRpcMonitor {
   void proxyOpFailureCommunicate();
 
   /**
+   * Failed to proxy an operation to a Namenode because the client was
+   * overloaded.
+   */
+  void proxyOpFailureClientOverloaded();
+
+  /**
    * Failed to proxy an operation because it is not implemented.
    */
   void proxyOpNotImplemented();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 21f26d0..6b466b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -289,7 +289,6 @@ public class RouterRpcServer extends AbstractService
     // We don't want the server to log the full stack trace for some exceptions
     this.rpcServer.addTerseExceptions(
         RemoteException.class,
-        StandbyException.class,
         SafeModeException.class,
         FileNotFoundException.class,
         FileAlreadyExistsException.class,
@@ -298,6 +297,9 @@ public class RouterRpcServer extends AbstractService
         NotReplicatedYetException.class,
         IOException.class);
 
+    this.rpcServer.addSuppressedLoggingExceptions(
+        StandbyException.class);
+
     // The RPC-server port can be ephemeral... ensure we have the correct info
     InetSocketAddress listenAddress = this.rpcServer.getListenerAddress();
     this.rpcAddress = new InetSocketAddress(
@@ -413,7 +415,7 @@ public class RouterRpcServer extends AbstractService
    * @throws UnsupportedOperationException If the operation is not supported.
    */
   protected void checkOperation(OperationCategory op, boolean supported)
-      throws RouterSafeModeException, UnsupportedOperationException {
+      throws StandbyException, UnsupportedOperationException {
     checkOperation(op);
 
     if (!supported) {
@@ -435,7 +437,7 @@ public class RouterRpcServer extends AbstractService
    *                           client requests.
    */
   protected void checkOperation(OperationCategory op)
-      throws RouterSafeModeException {
+      throws StandbyException {
     // Log the function we are currently calling.
     if (rpcMonitor != null) {
       rpcMonitor.startOp();
@@ -459,7 +461,8 @@ public class RouterRpcServer extends AbstractService
       if (rpcMonitor != null) {
         rpcMonitor.routerFailureSafemode();
       }
-      throw new RouterSafeModeException(router.getRouterId(), op);
+      throw new StandbyException("Router " + router.getRouterId() +
+          " is in safe mode and cannot handle " + op + " requests");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java
deleted file mode 100644
index 7a78b5b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
-import org.apache.hadoop.ipc.StandbyException;
-
-/**
- * Exception that the Router throws when it is in safe mode. This extends
- * {@link StandbyException} for the client to try another Router when it gets
- * this exception.
- */
-public class RouterSafeModeException extends StandbyException {
-
-  private static final long serialVersionUID = 453568188334993493L;
-
-  /** Identifier of the Router that generated this exception. */
-  private final String routerId;
-
-  /**
-   * Build a new Router safe mode exception.
-   * @param router Identifier of the Router.
-   * @param op Category of the operation (READ/WRITE).
-   */
-  public RouterSafeModeException(String router, OperationCategory op) {
-    super("Router " + router + " is in safe mode and cannot handle " + op
-        + " requests.");
-    this.routerId = router;
-  }
-
-  /**
-   * Get the id of the Router that generated this exception.
-   * @return Id of the Router that generated this exception.
-   */
-  public String getRouterId() {
-    return this.routerId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
index 92f899d..8806cb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -431,4 +431,13 @@
     </description>
   </property>
 
+  <property>
+    <name>dfs.federation.router.client.reject.overload</name>
+    <value>false</value>
+    <description>
+      Set to true to reject client requests when we run out of RPC client
+      threads.
+    </description>
+  </property>
+
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
index ed1428a..ce320f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.Whitebox;
+import org.mockito.internal.util.reflection.Whitebox;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
index bf63b18..9d56f13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
@@ -28,6 +28,10 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
 import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
@@ -37,6 +41,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * Test utility to mimic a federated HDFS cluster with a router and a state
@@ -145,4 +150,27 @@ public class StateStoreDFSCluster extends MiniRouterDFSCluster {
     entries.add(entry);
     return entries;
   }
+
+  /**
+   * Get the client configuration which targets all the Routers. It uses the HA
+   * setup to fails over between them.
+   * @return Configuration for the client which uses two routers.
+   */
+  public Configuration getRouterClientConf() {
+    List<RouterContext> routers = getRouters();
+    Configuration clientConf = DFSTestUtil.newHAConfiguration("fed");
+    int i = 0;
+    List<String> names = new ArrayList<>(routers.size());
+    for (RouterContext routerContext : routers) {
+      String name = "r" + i++;
+      clientConf.set(
+          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".fed." + name,
+          "localhost:" + routerContext.getRpcPort());
+      names.add(name);
+    }
+    clientConf.set(DFSUtil.addKeySuffixes(
+        HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, "fed"),
+        StringUtils.join(",", names));
+    return clientConf;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java
new file mode 100644
index 0000000..3c51e13
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.junit.After;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * Test the Router overload control which rejects requests when the RPC client
+ * is overloaded. This feature is managed by
+ * {@link RBFConfigKeys#DFS_ROUTER_CLIENT_REJECT_OVERLOAD}.
+ */
+public class TestRouterClientRejectOverload {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRouterClientRejectOverload.class);
+
+  private StateStoreDFSCluster cluster;
+
+  @After
+  public void cleanup() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  private void setupCluster(boolean overloadControl) throws Exception {
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 2);
+    Configuration routerConf = new RouterConfigBuilder()
+        .stateStore()
+        .metrics()
+        .admin()
+        .rpc()
+        .build();
+
+    // Reduce the number of RPC clients threads to overload the Router easy
+    routerConf.setInt(RBFConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE, 4);
+    // Overload control
+    routerConf.setBoolean(
+        RBFConfigKeys.DFS_ROUTER_CLIENT_REJECT_OVERLOAD, overloadControl);
+
+    // No need for datanodes as we use renewLease() for testing
+    cluster.setNumDatanodesPerNameservice(0);
+
+    cluster.addRouterOverrides(routerConf);
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+  }
+
+  @Test
+  public void testWithoutOverloadControl() throws Exception {
+    setupCluster(false);
+
+    // Nobody should get overloaded
+    testOverloaded(0);
+
+    // Set subcluster 0 as slow
+    MiniDFSCluster dfsCluster = cluster.getCluster();
+    NameNode nn0 = dfsCluster.getNameNode(0);
+    simulateSlowNamenode(nn0, 1);
+
+    // Nobody should get overloaded, but it will be really slow
+    testOverloaded(0);
+
+    // No rejected requests expected
+    for (RouterContext router : cluster.getRouters()) {
+      FederationRPCMetrics rpcMetrics =
+          router.getRouter().getRpcServer().getRPCMetrics();
+      assertEquals(0, rpcMetrics.getProxyOpFailureClientOverloaded());
+    }
+  }
+
+  @Test
+  public void testOverloadControl() throws Exception {
+    setupCluster(true);
+
+    List<RouterContext> routers = cluster.getRouters();
+    FederationRPCMetrics rpcMetrics0 =
+        routers.get(0).getRouter().getRpcServer().getRPCMetrics();
+    FederationRPCMetrics rpcMetrics1 =
+        routers.get(1).getRouter().getRpcServer().getRPCMetrics();
+
+    // Nobody should get overloaded
+    testOverloaded(0);
+    assertEquals(0, rpcMetrics0.getProxyOpFailureClientOverloaded());
+    assertEquals(0, rpcMetrics1.getProxyOpFailureClientOverloaded());
+
+    // Set subcluster 0 as slow
+    MiniDFSCluster dfsCluster = cluster.getCluster();
+    NameNode nn0 = dfsCluster.getNameNode(0);
+    simulateSlowNamenode(nn0, 1);
+
+    // The subcluster should be overloaded now and reject 4-5 requests
+    testOverloaded(4, 6);
+    assertTrue(rpcMetrics0.getProxyOpFailureClientOverloaded()
+        + rpcMetrics1.getProxyOpFailureClientOverloaded() >= 4);
+
+    // Client using HA with 2 Routers
+    // A single Router gets overloaded, but 2 will handle it
+    Configuration clientConf = cluster.getRouterClientConf();
+
+    // Each Router should get a similar number of ops (>=8) out of 2*10
+    long iniProxyOps0 = rpcMetrics0.getProxyOps();
+    long iniProxyOps1 = rpcMetrics1.getProxyOps();
+    testOverloaded(0, 0, new URI("hdfs://fed/"), clientConf, 10);
+    long proxyOps0 = rpcMetrics0.getProxyOps() - iniProxyOps0;
+    long proxyOps1 = rpcMetrics1.getProxyOps() - iniProxyOps1;
+    assertEquals(2 * 10, proxyOps0 + proxyOps1);
+    assertTrue(proxyOps0 + " operations: not distributed", proxyOps0 >= 8);
+    assertTrue(proxyOps1 + " operations: not distributed", proxyOps1 >= 8);
+  }
+
+  private void testOverloaded(int expOverload) throws Exception {
+    testOverloaded(expOverload, expOverload);
+  }
+
+  private void testOverloaded(int expOverloadMin, int expOverloadMax)
+      throws Exception {
+    RouterContext routerContext = cluster.getRandomRouter();
+    URI address = routerContext.getFileSystemURI();
+    Configuration conf = new HdfsConfiguration();
+    testOverloaded(expOverloadMin, expOverloadMax, address, conf, 10);
+  }
+
+  /**
+   * Test if the Router gets overloaded by submitting requests in parallel.
+   * We check how many requests got rejected at the end.
+   * @param expOverloadMin Min number of requests expected as overloaded.
+   * @param expOverloadMax Max number of requests expected as overloaded.
+   * @param address Destination address.
+   * @param conf Configuration of the client.
+   * @param numOps Number of operations to submit.
+   * @throws Exception If it cannot perform the test.
+   */
+  private void testOverloaded(int expOverloadMin, int expOverloadMax,
+      final URI address, final Configuration conf, final int numOps)
+          throws Exception {
+
+    // Submit renewLease() ops which go to all subclusters
+    final AtomicInteger overloadException = new AtomicInteger();
+    ExecutorService exec = Executors.newFixedThreadPool(numOps);
+    List<Future<?>> futures = new ArrayList<>();
+    for (int i = 0; i < numOps; i++) {
+      // Stagger the operations a little (50ms)
+      final int sleepTime = i * 50;
+      Future<?> future = exec.submit(new Runnable() {
+        @Override
+        public void run() {
+          DFSClient routerClient = null;
+          try {
+            Thread.sleep(sleepTime);
+            routerClient = new DFSClient(address, conf);
+            String clientName = routerClient.getClientName();
+            ClientProtocol routerProto = routerClient.getNamenode();
+            routerProto.renewLease(clientName);
+          } catch (RemoteException re) {
+            IOException ioe = re.unwrapRemoteException();
+            assertTrue("Wrong exception: " + ioe,
+                ioe instanceof StandbyException);
+            assertExceptionContains("is overloaded", ioe);
+            overloadException.incrementAndGet();
+          } catch (IOException e) {
+            fail("Unexpected exception: " + e);
+          } catch (InterruptedException e) {
+            fail("Cannot sleep: " + e);
+          } finally {
+            if (routerClient != null) {
+              try {
+                routerClient.close();
+              } catch (IOException e) {
+                LOG.error("Cannot close the client");
+              }
+            }
+          }
+        }
+      });
+      futures.add(future);
+    }
+    // Wait until all the requests are done
+    while (!futures.isEmpty()) {
+      futures.remove(0).get();
+    }
+    exec.shutdown();
+
+    int num = overloadException.get();
+    if (expOverloadMin == expOverloadMax) {
+      assertEquals(expOverloadMin, num);
+    } else {
+      assertTrue("Expected >=" + expOverloadMin + " but was " + num,
+          num >= expOverloadMin);
+      assertTrue("Expected <=" + expOverloadMax + " but was " + num,
+          num <= expOverloadMax);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index 372dd3b..e5ab3ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.apache.hadoop.test.GenericTestUtils.waitFor;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.List;
@@ -44,13 +44,8 @@ import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
 import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
-import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.Whitebox;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.After;
@@ -58,10 +53,6 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
@@ -70,9 +61,6 @@ import com.google.common.base.Supplier;
  */
 public class TestRouterRPCClientRetries {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestRouterRPCClientRetries.class);
-
   private static StateStoreDFSCluster cluster;
   private static NamenodeContext nnContext1;
   private static RouterContext routerContext;
@@ -144,7 +132,7 @@ public class TestRouterRPCClientRetries {
       fail("Should have thrown RemoteException error.");
     } catch (RemoteException e) {
       String ns0 = cluster.getNameservices().get(0);
-      GenericTestUtils.assertExceptionContains(
+      assertExceptionContains(
           "No namenode available under nameservice " + ns0, e);
     }
 
@@ -212,14 +200,14 @@ public class TestRouterRPCClientRetries {
     // Making subcluster0 slow to reply, should only get DNs from nn1
     MiniDFSCluster dfsCluster = cluster.getCluster();
     NameNode nn0 = dfsCluster.getNameNode(0);
-    simulateNNSlow(nn0);
+    simulateSlowNamenode(nn0, 3);
     waitUpdateLiveNodes(jsonString2, metrics);
     final String jsonString3 = metrics.getLiveNodes();
     assertEquals(2, getNumDatanodes(jsonString3));
 
     // Making subcluster1 slow to reply, shouldn't get any DNs
     NameNode nn1 = dfsCluster.getNameNode(1);
-    simulateNNSlow(nn1);
+    simulateSlowNamenode(nn1, 3);
     waitUpdateLiveNodes(jsonString3, metrics);
     final String jsonString4 = metrics.getLiveNodes();
     assertEquals(0, getNumDatanodes(jsonString4));
@@ -249,36 +237,11 @@ public class TestRouterRPCClientRetries {
   private static void waitUpdateLiveNodes(
       final String oldValue, final NamenodeBeanMetrics metrics)
           throws Exception {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+    waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
         return !oldValue.equals(metrics.getLiveNodes());
       }
     }, 500, 5 * 1000);
   }
-
-  /**
-   * Simulate that a Namenode is slow by adding a sleep to the check operation
-   * in the NN.
-   * @param nn Namenode to simulate slow.
-   * @throws Exception If we cannot add the sleep time.
-   */
-  private static void simulateNNSlow(final NameNode nn) throws Exception {
-    FSNamesystem namesystem = nn.getNamesystem();
-    HAContext haContext = namesystem.getHAContext();
-    HAContext spyHAContext = spy(haContext);
-    doAnswer(new Answer<Object>() {
-      @Override
-      public Object answer(InvocationOnMock invocation) throws Throwable {
-        LOG.info("Simulating slow namenode {}", invocation.getMock());
-        try {
-          Thread.sleep(3 * 1000);
-        } catch(InterruptedException e) {
-          LOG.error("Simulating a slow namenode aborted");
-        }
-        return null;
-      }
-    }).when(spyHAContext).checkOperation(any(OperationCategory.class));
-    Whitebox.setInternalState(namesystem, "haContext", spyHAContext);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
index e5d8348..f16ceb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
@@ -33,6 +33,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -187,7 +188,7 @@ public class TestRouterSafemode {
     try {
       router.getRpcServer().delete("/testfile.txt", true);
       fail("We should have thrown a safe mode exception");
-    } catch (RouterSafeModeException sme) {
+    } catch (StandbyException sme) {
       exception = true;
     }
     assertTrue("We should have thrown a safe mode exception", exception);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HADOOP-15406. hadoop-nfs dependencies for mockito and junit are not test scope

Posted by xk...@apache.org.
HADOOP-15406. hadoop-nfs dependencies for mockito and junit are not test scope

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e07156e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e07156e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e07156e8

Branch: refs/heads/HDFS-12943
Commit: e07156e8b07552b877a22565641465e211144f6f
Parents: 3376872
Author: Jason Lowe <jl...@oath.com>
Authored: Wed May 2 17:30:10 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 2 17:30:10 2018 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-nfs/pom.xml | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e07156e8/hadoop-common-project/hadoop-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index 8546112..80d8cd2 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -56,10 +56,12 @@
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>commons-logging</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HADOOP-15444 ITestS3GuardToolDynamo should only run with -Ddynamo (Aaron Fabbri)

Posted by xk...@apache.org.
HADOOP-15444 ITestS3GuardToolDynamo should only run with -Ddynamo (Aaron Fabbri)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96c843f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96c843f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96c843f6

Branch: refs/heads/HDFS-12943
Commit: 96c843f64bb424cd7544be0ccda16a6755c086de
Parents: 8cdb032
Author: Aaron Fabbri <fa...@apache.org>
Authored: Fri May 4 11:34:37 2018 -0700
Committer: Aaron Fabbri <fa...@apache.org>
Committed: Fri May 4 11:34:45 2018 -0700

----------------------------------------------------------------------
 .../hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java     | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c843f6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
index c7dffd2..821bba5 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import com.amazonaws.services.dynamodbv2.document.DynamoDB;
 import com.amazonaws.services.dynamodbv2.document.Table;
 import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
+import org.junit.Assume;
 import org.junit.Test;
 
 import org.apache.hadoop.conf.Configuration;
@@ -51,6 +52,14 @@ public class ITestS3GuardToolDynamoDB extends AbstractS3GuardToolTestBase {
     return new DynamoDBMetadataStore();
   }
 
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    Assume.assumeTrue("Test only applies when DynamoDB is used for S3Guard",
+        getConfiguration().get(Constants.S3_METADATA_STORE_IMPL).equals(
+            Constants.S3GUARD_METASTORE_DYNAMO));
+  }
+
   // Check the existence of a given DynamoDB table.
   private static boolean exist(DynamoDB dynamoDB, String tableName) {
     assertNotNull(dynamoDB);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HADOOP-15382. Log kinit output in credential renewal thread. Contributed by Gabor Bota.

Posted by xk...@apache.org.
HADOOP-15382. Log kinit output in credential renewal thread. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bff3d7b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bff3d7b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bff3d7b0

Branch: refs/heads/HDFS-12943
Commit: bff3d7b0cf073ccc061db30af6d52fa4a9f21c05
Parents: 24a5ccb
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Apr 27 10:05:55 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Apr 27 10:05:55 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/security/UserGroupInformation.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bff3d7b0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index a9f6cb6..cb132b3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -866,9 +866,9 @@ public class UserGroupInformation {
             if (now < nextRefresh) {
               Thread.sleep(nextRefresh - now);
             }
-            Shell.execCommand(cmd, "-R");
+            String output = Shell.execCommand(cmd, "-R");
             if (LOG.isDebugEnabled()) {
-              LOG.debug("renewed ticket");
+              LOG.debug("Renewed ticket. kinit output: {}", output);
             }
             reloginFromTicketCache();
             tgt = getTGT();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: YARN-8113. Update placement constraints doc with application namespaces and inter-app constraints. Contributed by Weiwei Yang.

Posted by xk...@apache.org.
YARN-8113. Update placement constraints doc with application namespaces and inter-app constraints. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b34fca4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b34fca4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b34fca4

Branch: refs/heads/HDFS-12943
Commit: 3b34fca4b5d67a2685852f30bb61e7c408a0e886
Parents: 883f682
Author: Konstantinos Karanasos <kk...@apache.org>
Authored: Wed May 2 11:48:35 2018 -0700
Committer: Konstantinos Karanasos <kk...@apache.org>
Committed: Wed May 2 11:49:56 2018 -0700

----------------------------------------------------------------------
 .../site/markdown/PlacementConstraints.md.vm    | 67 +++++++++++++++-----
 1 file changed, 52 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34fca4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
index cb34c3f..4ac1683 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
@@ -28,7 +28,7 @@ YARN allows applications to specify placement constraints in the form of data lo
 
 For example, it may be beneficial to co-locate the allocations of a job on the same rack (*affinity* constraints) to reduce network costs, spread allocations across machines (*anti-affinity* constraints) to minimize resource interference, or allow up to a specific number of allocations in a node group (*cardinality* constraints) to strike a balance between the two. Placement decisions also affect resilience. For example, allocations placed within the same cluster upgrade domain would go offline simultaneously.
 
-The applications can specify constraints without requiring knowledge of the underlying topology of the cluster (e.g., one does not need to specify the specific node or rack where their containers should be placed with constraints) or the other applications deployed. Currently **intra-application** constraints are supported, but the design that is followed is generic and support for constraints across applications will soon be added. Moreover, all constraints at the moment are **hard**, that is, if the constraints for a container cannot be satisfied due to the current cluster condition or conflicting constraints, the container request will remain pending or get will get rejected.
+The applications can specify constraints without requiring knowledge of the underlying topology of the cluster (e.g., one does not need to specify the specific node or rack where their containers should be placed with constraints) or the other applications deployed. Currently, all constraints are **hard**, that is, if a constraint for a container cannot be satisfied due to the current cluster condition or conflicting constraints, the container request will remain pending or get rejected.
 
 Note that in this document we use the notion of “allocation” to refer to a unit of resources (e.g., CPU and memory) that gets allocated in a node. In the current implementation of YARN, an allocation corresponds to a single container. However, in case an application uses an allocation to spawn more than one containers, an allocation could correspond to multiple containers.
 
@@ -65,15 +65,19 @@ $ yarn org.apache.hadoop.yarn.applications.distributedshell.Client -jar share/ha
 where **PlacementSpec** is of the form:
 
 ```
-PlacementSpec => "" | KeyVal;PlacementSpec
-KeyVal        => SourceTag=Constraint
-SourceTag     => String
-Constraint    => NumContainers | NumContainers,"IN",Scope,TargetTag | NumContainers,"NOTIN",Scope,TargetTag | NumContainers,"CARDINALITY",Scope,TargetTag,MinCard,MaxCard
-NumContainers => int
-Scope         => "NODE" | "RACK"
-TargetTag     => String
-MinCard       => int
-MaxCard       => int
+PlacementSpec         => "" | KeyVal;PlacementSpec
+KeyVal                => SourceTag=ConstraintExpr
+SourceTag             => String
+ConstraintExpr        => NumContainers | NumContainers, Constraint
+Constraint            => SingleConstraint | CompositeConstraint
+SingleConstraint      => "IN",Scope,TargetTag | "NOTIN",Scope,TargetTag | "CARDINALITY",Scope,TargetTag,MinCard,MaxCard
+CompositeConstraint   => AND(ConstraintList) | OR(ConstraintList)
+ConstraintList        => Constraint | Constraint:ConstraintList
+NumContainers         => int
+Scope                 => "NODE" | "RACK"
+TargetTag             => String
+MinCard               => int
+MaxCard               => int
 ```
 
 Note that when the `-placement_spec` argument is specified in the distributed shell command, the `-num-containers` argument should not be used. In case `-num-containers` argument is used in conjunction with `-placement-spec`, the former is ignored. This is because in PlacementSpec, we determine the number of containers per tag, making the `-num-containers` redundant and possibly conflicting. Moreover, if `-placement_spec` is used, all containers will be requested with GUARANTEED execution type.
@@ -82,11 +86,18 @@ An example of PlacementSpec is the following:
 ```
 zk=3,NOTIN,NODE,zk:hbase=5,IN,RACK,zk:spark=7,CARDINALITY,NODE,hbase,1,3
 ```
-The above encodes two constraints:
+The above encodes three constraints:
 * place 3 containers with tag "zk" (standing for ZooKeeper) with node anti-affinity to each other, i.e., do not place more than one container per node (notice that in this first constraint, the SourceTag and the TargetTag of the constraint coincide);
 * place 5 containers with tag "hbase" with affinity to a rack on which containers with tag "zk" are running (i.e., an "hbase" container should not be placed at a rack where an "zk" container is running, given that "zk" is the TargetTag of the second constraint);
-* place 7 container with tag "spark" in nodes that have at least one, but no more than three, containers, with tag "hbase".
+* place 7 containers with tag "spark" in nodes that have at least one, but no more than three, containers with tag "hbase".
 
+Another example below demonstrates a composite form of constraint:
+```
+zk=5,AND(IN,RACK,hbase:NOTIN,NODE,zk)
+```
+The above constraint uses the conjunction operator `AND` to combine two constraints. The AND constraint is satisfied when both its children constraints are satisfied. The specific PlacementSpec requests to place 5 "zk" containers in a rack where at least one "hbase" container is running, and on a node that no "zk" container is running.
+Similarly, an `OR` operator can be used to define a constraint that is satisfied when at least one of its children constraints is satisfied.
+Note that in case "zk" and "hbase" are containers belonging to different applications (which is most probably the case in real use cases), the allocation tags in the PlacementSpec should include namespaces, as we describe below (see [Allocation tags namespace](#Allocation_tags_namespace)).
 
 
 Defining Placement Constraints
@@ -98,11 +109,37 @@ Allocation tags are string tags that an application can associate with (groups o
 
 Note that instead of using the `ResourceRequest` object to define allocation tags, we use the new `SchedulingRequest` object. This has many similarities with the `ResourceRequest`, but better separates the sizing of the requested allocations (number and size of allocations, priority, execution type, etc.), and the constraints dictating how these allocations should be placed (resource name, relaxed locality). Applications can still use `ResourceRequest` objects, but in order to define allocation tags and constraints, they need to use the `SchedulingRequest` object. Within a single `AllocateRequest`, an application should use either the `ResourceRequest` or the `SchedulingRequest` objects, but not both of them.
 
+$H4 Allocation tags namespace
+
+Allocation tags might refer to containers of the same or different applications, and are used to express intra- or inter-application constraints, respectively.
+We use allocation tag namespaces in order to specify the scope of applications that an allocation tag can refer to. By coupling an allocation tag with a namespace, we can restrict whether the tag targets containers that belong to the same application, to a certain group of applications, or to any application in the cluster.
+
+We currently support the following namespaces:
+
+| Namespace | Syntax | Description |
+|:--------- |:-------|:------------|
+| SELF | `self/${allocationTag}` | The allocation tag refers to containers of the current application (to which the constraint will be applied). This is the default namespace. |
+| NOT_SELF | `not-self/${allocationTag}` | The allocation tag refers only to containers that do not belong to the current application. |
+| ALL | `all/${allocationTag}` | The allocation tag refers to containers of any application. |
+| APP_ID | `app-id/${applicationID}/${allocationTag}` | The allocation tag refers to containers of the application with the specified application ID. |
+| APP_TAG | `app-tag/application_tag_name/${allocationTag}` | The allocation tag refers to containers of applications that are tagged with the specified application tag. |
+
+
+To attach an allocation tag namespace `ns` to a target tag `targetTag`, we use the syntax `ns/allocationTag` in the PlacementSpec. Note that the default namespace is `SELF`, which is used for **intra-app** constraints. The remaining namespace tags are used to specify **inter-app** constraints. When the namespace is not specified next to a tag, `SELF` is assumed.
+
+The example constraints used above could be extended with namespaces as follows:
+```
+zk=3,NOTIN,NODE,not-self/zk:hbase=5,IN,RACK,all/zk:spark=7,CARDINALITY,NODE,app-id/appID_0023/hbase,1,3
+```
+The semantics of these constraints are the following:
+* place 3 containers with tag "zk" (standing for ZooKeeper) to nodes that do not have "zk" containers from other applications running;
+* place 5 containers with tag "hbase" with affinity to a rack on which containers with tag "zk" (from any application, be it the same or a different one) are running;
+* place 7 containers with tag "spark" in nodes that have at least one, but no more than three, containers with tag "hbase" belonging to application with ID `appID_0023`.
+
 $H4 Differences between node labels, node attributes and allocation tags
 
 The difference between allocation tags and node labels or node attributes (YARN-3409), is that allocation tags are attached to allocations and not to nodes. When an allocation gets allocated to a node by the scheduler, the set of tags of that allocation are automatically added to the node for the duration of the allocation. Hence, a node inherits the tags of the allocations that are currently allocated to the node. Likewise, a rack inherits the tags of its nodes. Moreover, similar to node labels and unlike node attributes, allocation tags have no value attached to them. As we show below, our constraints can refer to allocation tags, as well as node labels and node attributes.
 
-
 $H3 Placement constraints API
 
 Applications can use the public API in the `PlacementConstraints` to construct placement constraint. Before describing the methods for building constraints, we describe the methods of the `PlacementTargets` class that are used to construct the target expressions that will then be used in constraints:
@@ -110,7 +147,7 @@ Applications can use the public API in the `PlacementConstraints` to construct p
 | Method | Description |
 |:------ |:----------- |
 | `allocationTag(String... allocationTags)` | Constructs a target expression on an allocation tag. It is satisfied if there are allocations with one of the given tags. |
-| `allocationTagToIntraApp(String... allocationTags)` | similar to `allocationTag(String...)`, but targeting only the containers of the application that will use this target (intra-application constraints). |
+| `allocationTagWithNamespace(String namespace, String... allocationTags)` | Similar to `allocationTag(String...)`, but allows to specify a namespace for the given allocation tags. |
 | `nodePartition(String... nodePartitions)` | Constructs a target expression on a node partition. It is satisfied for nodes that belong to one of the `nodePartitions`. |
 | `nodeAttribute(String attributeKey, String... attributeValues)` | Constructs a target expression on a node attribute. It is satisfied if the specified node attribute has one of the specified values. |
 
@@ -136,4 +173,4 @@ Applications have to specify the containers for which each constraint will be en
 
 When using the `placement-processor` handler (see [Enabling placement constraints](#Enabling_placement_constraints)), this constraint mapping is specified within the `RegisterApplicationMasterRequest`.
 
-When using the `scheduler` handler, the constraints can also be added at each `SchedulingRequest` object. Each such constraint is valid for the tag of that scheduling request. In case constraints are specified both at the `RegisterApplicationMasterRequest` and the scheduling requests, the latter override the former.
+When using the `scheduler` handler, the constraints can also be added at each `SchedulingRequest` object. Each such constraint is valid for the tag of that scheduling request. In case constraints are specified both at the `RegisterApplicationMasterRequest` and the scheduling requests, the latter override the former.
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HADOOP-15250. Split-DNS MultiHomed Server Network Cluster Network IPC Client Bind Addr Wrong Contributed by Ajay Kumar

Posted by xk...@apache.org.
HADOOP-15250. Split-DNS MultiHomed Server Network Cluster Network IPC Client Bind Addr Wrong
Contributed by Ajay Kumar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f42dafc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f42dafc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f42dafc

Branch: refs/heads/HDFS-12943
Commit: 8f42dafcf82d5b426dd931dc5ddd177dd6f283f7
Parents: 68c6ec7
Author: Steve Loughran <st...@apache.org>
Authored: Tue May 1 22:32:40 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue May 1 22:32:40 2018 +0100

----------------------------------------------------------------------
 .../apache/hadoop/fs/CommonConfigurationKeys.java   |  4 ++++
 .../src/main/java/org/apache/hadoop/ipc/Client.java | 16 ++++++++++++----
 .../main/java/org/apache/hadoop/net/NetUtils.java   | 16 ++++++++++++++++
 .../src/main/resources/core-default.xml             |  8 ++++++++
 .../java/org/apache/hadoop/net/TestNetUtils.java    |  8 ++++++++
 5 files changed, 48 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f42dafc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 043e52a..1eb27f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -341,6 +341,10 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = "ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 
+  public static final String  IPC_CLIENT_BIND_WILDCARD_ADDR_KEY = "ipc.client"
+      + ".bind.wildcard.addr";
+  public static final boolean IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT = false;
+
   public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY =
     "ipc.client.connect.max.retries.on.sasl";
   public static final int    IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT = 5;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f42dafc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index a0417d6..163e80d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -135,6 +135,7 @@ public class Client implements AutoCloseable {
   private final int connectionTimeout;
 
   private final boolean fallbackAllowed;
+  private final boolean bindToWildCardAddress;
   private final byte[] clientId;
   private final int maxAsyncCalls;
   private final AtomicInteger asyncCallCounter = new AtomicInteger(0);
@@ -674,10 +675,10 @@ public class Client implements AutoCloseable {
               InetAddress localAddr = NetUtils.getLocalInetAddress(host);
               if (localAddr != null) {
                 this.socket.setReuseAddress(true);
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("Binding " + principal + " to " + localAddr);
-                }
-                bindAddr = new InetSocketAddress(localAddr, 0);
+                localAddr = NetUtils.bindToLocalAddress(localAddr,
+                    bindToWildCardAddress);
+                LOG.debug("Binding {} to {}", principal, localAddr);
+                this.socket.bind(new InetSocketAddress(localAddr, 0));
               }
             }
           }
@@ -1277,6 +1278,13 @@ public class Client implements AutoCloseable {
         CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
     this.fallbackAllowed = conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
         CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
+    this.bindToWildCardAddress = conf
+        .getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY,
+            CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
+    LOG.debug("{} set to true. Will bind client sockets to wildcard "
+            + "address.",
+        CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY);
+
     this.clientId = ClientId.getClientId();
     this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();
     this.maxAsyncCalls = conf.getInt(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f42dafc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index e16c2a3..0f9cfc3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -952,4 +952,20 @@ public class NetUtils {
     }
     return port;
   }
+
+  /**
+   * Return an @{@link InetAddress} to bind to. If bindWildCardAddress is true
+   * than returns null.
+   *
+   * @param localAddr
+   * @param bindWildCardAddress
+   * @returns InetAddress
+   */
+  public static InetAddress bindToLocalAddress(InetAddress localAddr, boolean
+      bindWildCardAddress) {
+    if (!bindWildCardAddress) {
+      return localAddr;
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f42dafc/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index fd72618..3a00131 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -3051,4 +3051,12 @@
       System tags to group related properties together.
     </description>
   </property>
+
+  <property>
+    <name>ipc.client.bind.wildcard.addr</name>
+    <value>false</value>
+    <description>When set to true Clients will bind socket to wildcard
+      address. (i.e 0.0.0.0)
+    </description>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f42dafc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index b463c95..30176f2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -707,6 +707,14 @@ public class TestNetUtils {
     assertEquals(defaultAddr.trim(), NetUtils.getHostPortString(addr));
   }
 
+  @Test
+  public void testBindToLocalAddress() throws Exception {
+    assertNotNull(NetUtils
+        .bindToLocalAddress(NetUtils.getLocalInetAddress("127.0.0.1"), false));
+    assertNull(NetUtils
+        .bindToLocalAddress(NetUtils.getLocalInetAddress("127.0.0.1"), true));
+  }
+
   private <T> void assertBetterArrayEquals(T[] expect, T[]got) {
     String expectStr = StringUtils.join(expect, ", ");
     String gotStr = StringUtils.join(got, ", ");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDFS-13503. Fix TestFsck test failures on Windows. Contributed by Xiao Liang.

Posted by xk...@apache.org.
HDFS-13503. Fix TestFsck test failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e2cfb2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e2cfb2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e2cfb2d

Branch: refs/heads/HDFS-12943
Commit: 9e2cfb2d3f1a18984d07c81f9c46626dd842402a
Parents: 4e1382a
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 1 08:12:46 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 1 08:12:46 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 123 ++++++++++++-------
 2 files changed, 81 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cfb2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index acb720e..c2e2a68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2924,7 +2924,8 @@ public class MiniDFSCluster implements AutoCloseable {
    * @return Storage directory
    */
   public File getStorageDir(int dnIndex, int dirIndex) {
-    return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
+    return new File(determineDfsBaseDir(),
+        getStorageDirPath(dnIndex, dirIndex));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cfb2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index f80fd70..1a392da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.MiniDFSCluster.HDFS_MINIDFS_BASEDIR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -209,7 +210,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
         precision);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     final String fileName = "/srcdat";
     util.createFiles(fs, fileName);
@@ -297,7 +300,9 @@ public class TestFsck {
         setNumFiles(20).build();
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
     util.waitReplication(fs, "/srcdat", (short)3);
@@ -315,7 +320,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
 
     // Create a cluster with the current user, write some files
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     final MiniDFSCluster c2 = cluster;
     final String dir = "/dfsck";
     final Path dirpath = new Path(dir);
@@ -361,8 +368,9 @@ public class TestFsck {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
         (5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
     FileSystem fs = null;
-    cluster = new MiniDFSCluster.Builder(conf).
-        numDataNodes(numDatanodes).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDatanodes).build();
     String topDir = "/srcdat";
     fs = cluster.getFileSystem();
     cluster.waitActive();
@@ -568,7 +576,9 @@ public class TestFsck {
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     String topDir = "/srcdat";
     fs = cluster.getFileSystem();
     cluster.waitActive();
@@ -632,7 +642,9 @@ public class TestFsck {
         setNumFiles(4).build();
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     String topDir = "/srcdat";
     String randomString = "HADOOP  ";
     fs = cluster.getFileSystem();
@@ -685,7 +697,8 @@ public class TestFsck {
     final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits();
     int blockSize = 2 * cellSize;
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(
         numAllUnits + 1).build();
     String topDir = "/myDir";
     cluster.waitActive();
@@ -776,7 +789,9 @@ public class TestFsck {
     String outStr = null;
     short factor = 1;
 
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/testCorruptBlock");
@@ -847,7 +862,9 @@ public class TestFsck {
     Random random = new Random();
     String outStr = null;
     short factor = 1;
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(2).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/testUnderMinReplicatedBlock");
@@ -919,9 +936,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
     DistributedFileSystem dfs;
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
 
@@ -1068,7 +1085,8 @@ public class TestFsck {
   @Test
   public void testFsckError() throws Exception {
     // bring up a one-node cluster
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     String fileName = "/test.txt";
     Path filePath = new Path(fileName);
     FileSystem fs = cluster.getFileSystem();
@@ -1100,7 +1118,8 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     FileSystem fs = null;
 
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     DFSTestUtil util = new DFSTestUtil.Builder().
@@ -1163,7 +1182,8 @@ public class TestFsck {
   @Test
   public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
     // bring up a one-node cluster
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     String fileName = "/test.txt";
     Path filePath = new Path(fileName);
     FileSystem fs = cluster.getFileSystem();
@@ -1207,8 +1227,9 @@ public class TestFsck {
     DistributedFileSystem dfs = null;
     
     // Startup a minicluster
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numReplicas).build();
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
@@ -1268,9 +1289,9 @@ public class TestFsck {
     DistributedFileSystem dfs = null;
     
     // Startup a minicluster
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-        .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
@@ -1377,7 +1398,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
         precision);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     final String fileName = "/srcdat";
     util.createFiles(fs, fileName);
@@ -1404,7 +1427,8 @@ public class TestFsck {
    */
   @Test
   public void testFsckForSnapshotFiles() throws Exception {
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(1)
         .build();
     String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
         "-files");
@@ -1439,9 +1463,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
     DistributedFileSystem dfs = null;
-    cluster =
-      new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-        .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1494,9 +1518,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
     DistributedFileSystem dfs;
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1579,7 +1603,8 @@ public class TestFsck {
         replFactor);
 
     DistributedFileSystem dfs;
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn)
         .hosts(hosts)
         .racks(racks)
@@ -1700,9 +1725,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
     DistributedFileSystem dfs = null;
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1769,7 +1794,8 @@ public class TestFsck {
    */
   @Test
   public void testStoragePoliciesCK() throws Exception {
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(3)
         .storageTypes(
             new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
@@ -1812,9 +1838,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
     DistributedFileSystem dfs;
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1894,7 +1920,8 @@ public class TestFsck {
         replFactor);
 
     DistributedFileSystem dfs;
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn)
         .hosts(hosts)
         .racks(racks)
@@ -2002,7 +2029,9 @@ public class TestFsck {
     int parityBlocks =
         StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
     int totalSize = dataBlocks + parityBlocks;
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(totalSize).build();
     fs = cluster.getFileSystem();
     fs.enableErasureCodingPolicy(
         StripedFileTestUtil.getDefaultECPolicy().getName());
@@ -2069,7 +2098,8 @@ public class TestFsck {
 
     int numFiles = 3;
     int numSnapshots = 0;
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     cluster.waitActive();
     hdfs = cluster.getFileSystem();
     DFSTestUtil util = new DFSTestUtil.Builder().
@@ -2149,7 +2179,8 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replication);
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     DistributedFileSystem dfs = cluster.getFileSystem();
     cluster.waitActive();
 
@@ -2244,6 +2275,7 @@ public class TestFsck {
     HostsFileWriter hostsFileWriter = new HostsFileWriter();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
+    conf.set(HDFS_MINIDFS_BASEDIR, GenericTestUtils.getRandomizedTempPath());
     if (defineUpgradeDomain) {
       conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
           CombinedHostFileManager.class, HostConfigManager.class);
@@ -2295,7 +2327,8 @@ public class TestFsck {
         StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
     int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
     int totalSize = dataBlocks + parityBlocks;
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(totalSize).build();
     fs = cluster.getFileSystem();
     fs.enableErasureCodingPolicy(
@@ -2366,7 +2399,8 @@ public class TestFsck {
         StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
     int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
     int totalSize = dataBlocks + parityBlocks;
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(totalSize).build();
     fs = cluster.getFileSystem();
     fs.enableErasureCodingPolicy(
@@ -2427,7 +2461,8 @@ public class TestFsck {
   @Test(timeout = 300000)
   public void testFsckCorruptWhenOneReplicaIsCorrupt()
       throws Exception {
-    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
+        new File(GenericTestUtils.getRandomizedTempPath()))
         .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(2)
         .build()) {
       cluster.waitActive();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HADOOP-15239 S3ABlockOutputStream.flush() be no-op when stream closed. Contributed by Gabor Bota.

Posted by xk...@apache.org.
HADOOP-15239 S3ABlockOutputStream.flush() be no-op when stream closed.  Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/919865a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/919865a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/919865a3

Branch: refs/heads/HDFS-12943
Commit: 919865a34bd5c3c99603993a0410846a97975869
Parents: fc074a3
Author: Aaron Fabbri <fa...@apache.org>
Authored: Mon Apr 30 16:02:57 2018 -0700
Committer: Aaron Fabbri <fa...@apache.org>
Committed: Mon Apr 30 16:02:57 2018 -0700

----------------------------------------------------------------------
 .../hadoop/fs/s3a/S3ABlockOutputStream.java     |  7 ++-
 .../hadoop/fs/s3a/TestS3ABlockOutputStream.java | 66 ++++++++++++++++++++
 2 files changed, 72 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/919865a3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index 96de8e4..bdffed4 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -238,7 +238,12 @@ class S3ABlockOutputStream extends OutputStream implements
    */
   @Override
   public synchronized void flush() throws IOException {
-    checkOpen();
+    try {
+      checkOpen();
+    } catch (IOException e) {
+      LOG.warn("Stream closed: " + e.getMessage());
+      return;
+    }
     S3ADataBlocks.DataBlock dataBlock = getActiveBlock();
     if (dataBlock != null) {
       dataBlock.flush();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/919865a3/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
new file mode 100644
index 0000000..ff176f5
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.apache.hadoop.fs.s3a.commit.PutTracker;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
+/**
+ * Unit tests for {@link S3ABlockOutputStream}.
+ */
+public class TestS3ABlockOutputStream extends AbstractS3AMockTest {
+
+  private S3ABlockOutputStream stream;
+
+  @Before
+  public void setUp() throws Exception {
+    ExecutorService executorService = mock(ExecutorService.class);
+    Progressable progressable = mock(Progressable.class);
+    S3ADataBlocks.BlockFactory blockFactory =
+        mock(S3ADataBlocks.BlockFactory.class);
+    long blockSize = Constants.DEFAULT_MULTIPART_SIZE;
+    S3AInstrumentation.OutputStreamStatistics statistics = null;
+    WriteOperationHelper oHelper = mock(WriteOperationHelper.class);
+    PutTracker putTracker = mock(PutTracker.class);
+    stream = spy(new S3ABlockOutputStream(fs, "", executorService,
+      progressable, blockSize, blockFactory, statistics, oHelper,
+      putTracker));
+  }
+
+  @Test
+  public void testFlushNoOpWhenStreamClosed() throws Exception {
+    doThrow(new IOException()).when(stream).checkOpen();
+
+    try {
+      stream.flush();
+    } catch (Exception e){
+      fail("Should not have any exception.");
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: MAPREDUCE-7086. Add config to allow FileInputFormat to ignore directories when recursive=false. Contributed by Sergey Shelukhin

Posted by xk...@apache.org.
MAPREDUCE-7086. Add config to allow FileInputFormat to ignore directories when recursive=false. Contributed by Sergey Shelukhin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c6ec71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c6ec71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c6ec71

Branch: refs/heads/HDFS-12943
Commit: 68c6ec719da8e79ada31c8f3a82124f90b9a71fd
Parents: 24eeea8
Author: Jason Lowe <jl...@apache.org>
Authored: Tue May 1 16:19:53 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue May 1 16:19:53 2018 -0500

----------------------------------------------------------------------
 .../apache/hadoop/mapred/FileInputFormat.java   | 25 ++++++++++++++------
 .../mapreduce/lib/input/FileInputFormat.java    |  8 +++++++
 .../hadoop/mapred/TestFileInputFormat.java      | 17 ++++++++++++-
 .../lib/input/TestFileInputFormat.java          | 12 ++++++++++
 4 files changed, 54 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c6ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index b0ec979..fe43991 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -78,10 +78,13 @@ public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
 
   public static final String NUM_INPUT_FILES =
     org.apache.hadoop.mapreduce.lib.input.FileInputFormat.NUM_INPUT_FILES;
-  
+
   public static final String INPUT_DIR_RECURSIVE = 
     org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE;
 
+  public static final String INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS =
+    org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS;
+
 
   private static final double SPLIT_SLOP = 1.1;   // 10% slop
 
@@ -319,16 +322,24 @@ public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
   public InputSplit[] getSplits(JobConf job, int numSplits)
     throws IOException {
     StopWatch sw = new StopWatch().start();
-    FileStatus[] files = listStatus(job);
-    
+    FileStatus[] stats = listStatus(job);
+
     // Save the number of input files for metrics/loadgen
-    job.setLong(NUM_INPUT_FILES, files.length);
+    job.setLong(NUM_INPUT_FILES, stats.length);
     long totalSize = 0;                           // compute total size
-    for (FileStatus file: files) {                // check we have valid files
+    boolean ignoreDirs = !job.getBoolean(INPUT_DIR_RECURSIVE, false)
+      && job.getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
+
+    List<FileStatus> files = new ArrayList<>(stats.length);
+    for (FileStatus file: stats) {                // check we have valid files
       if (file.isDirectory()) {
-        throw new IOException("Not a file: "+ file.getPath());
+        if (!ignoreDirs) {
+          throw new IOException("Not a file: "+ file.getPath());
+        }
+      } else {
+        files.add(file);
+        totalSize += file.getLen();
       }
-      totalSize += file.getLen();
     }
 
     long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c6ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index 9868e8e..e2d8e6f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -76,6 +76,8 @@ public abstract class FileInputFormat<K, V> extends InputFormat<K, V> {
     "mapreduce.input.fileinputformat.numinputfiles";
   public static final String INPUT_DIR_RECURSIVE =
     "mapreduce.input.fileinputformat.input.dir.recursive";
+  public static final String INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS =
+    "mapreduce.input.fileinputformat.input.dir.nonrecursive.ignore.subdirs";
   public static final String LIST_STATUS_NUM_THREADS =
       "mapreduce.input.fileinputformat.list-status.num-threads";
   public static final int DEFAULT_LIST_STATUS_NUM_THREADS = 1;
@@ -392,7 +394,13 @@ public abstract class FileInputFormat<K, V> extends InputFormat<K, V> {
     // generate splits
     List<InputSplit> splits = new ArrayList<InputSplit>();
     List<FileStatus> files = listStatus(job);
+
+    boolean ignoreDirs = !getInputDirRecursive(job)
+      && job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
     for (FileStatus file: files) {
+      if (ignoreDirs && file.isDirectory()) {
+        continue;
+      }
       Path path = file.getPath();
       long length = file.getLen();
       if (length != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c6ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
index d322011..879cd3d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
@@ -102,7 +102,22 @@ public class TestFileInputFormat {
         1, mockFs.numListLocatedStatusCalls);
     FileSystem.closeAll();
   }
-  
+
+  @Test
+  public void testIgnoreDirs() throws Exception {
+    Configuration conf = getConfiguration();
+    conf.setBoolean(FileInputFormat.INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, true);
+    conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
+    conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, "test:///a1");
+    MockFileSystem mockFs = (MockFileSystem) new Path("test:///").getFileSystem(conf);
+    JobConf job = new JobConf(conf);
+    TextInputFormat fileInputFormat = new TextInputFormat();
+    fileInputFormat.configure(job);
+    InputSplit[] splits = fileInputFormat.getSplits(job, 1);
+    Assert.assertEquals("Input splits are not correct", 1, splits.length);
+    FileSystem.closeAll();
+  }
+
   @Test
   public void testSplitLocationInfo() throws Exception {
     Configuration conf = getConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c6ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
index 4c847fa..3897a9b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
@@ -124,6 +124,18 @@ public class TestFileInputFormat {
   }
 
   @Test
+  public void testNumInputFilesIgnoreDirs() throws Exception {
+    Configuration conf = getConfiguration();
+    conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
+    conf.setBoolean(FileInputFormat.INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, true);
+    Job job = Job.getInstance(conf);
+    FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
+    List<InputSplit> splits = fileInputFormat.getSplits(job);
+    Assert.assertEquals("Input splits are not correct", 1, splits.size());
+    verifySplits(Lists.newArrayList("test:/a1/file1"), splits);
+  }
+
+  @Test
   public void testListLocatedStatus() throws Exception {
     Configuration conf = getConfiguration();
     conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: YARN-8209. Fixed NPE in Yarn Service deletion. Contributed by Eric Badger

Posted by xk...@apache.org.
YARN-8209.  Fixed NPE in Yarn Service deletion.
            Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/883f6822
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/883f6822
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/883f6822

Branch: refs/heads/HDFS-12943
Commit: 883f68222a9cfd06f79a8fcd75ec9fef00abc035
Parents: 19ae588
Author: Eric Yang <ey...@apache.org>
Authored: Wed May 2 14:33:31 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Wed May 2 14:33:31 2018 -0400

----------------------------------------------------------------------
 .../linux/privileged/PrivilegedOperation.java   |  4 +-
 .../runtime/DockerLinuxContainerRuntime.java    | 21 +++++----
 .../linux/runtime/docker/DockerClient.java      |  7 ++-
 .../linux/runtime/docker/DockerCommand.java     | 32 +++++++++++++
 .../runtime/docker/DockerCommandExecutor.java   | 12 ++---
 .../runtime/docker/DockerInspectCommand.java    | 19 ++++++++
 .../linux/runtime/docker/DockerRmCommand.java   | 16 +++++++
 .../impl/container-executor.c                   | 28 ++++++++++++
 .../impl/container-executor.h                   | 10 ++++-
 .../main/native/container-executor/impl/main.c  | 47 ++++++++++++++++++--
 .../docker/TestDockerCommandExecutor.java       | 31 +++++++------
 11 files changed, 184 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
index 189c0d0..92a82e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
@@ -54,7 +54,9 @@ public class PrivilegedOperation {
     GPU("--module-gpu"),
     FPGA("--module-fpga"),
     LIST_AS_USER(""), // no CLI switch supported yet.
-    ADD_NUMA_PARAMS(""); // no CLI switch supported yet.
+    ADD_NUMA_PARAMS(""), // no CLI switch supported yet.
+    REMOVE_DOCKER_CONTAINER("--remove-docker-container"),
+    INSPECT_DOCKER_CONTAINER("--inspect-docker-container");
 
     private final String option;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 9c05c59..ec1d055 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommand;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommandExecutor;
@@ -384,7 +385,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       Container container) throws ContainerExecutionException {
     try {
       String commandFile = dockerClient.writeCommandToTempFile(
-          dockerVolumeCommand, container, nmContext);
+          dockerVolumeCommand, container.getContainerId(), nmContext);
       PrivilegedOperation privOp = new PrivilegedOperation(
           PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
       privOp.appendArgs(commandFile);
@@ -734,6 +735,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   public void launchContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
     Container container = ctx.getContainer();
+    ContainerId containerId = container.getContainerId();
     Map<String, String> environment = container.getLaunchContext()
         .getEnvironment();
     String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE);
@@ -750,7 +752,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     validateImageName(imageName);
 
-    String containerIdStr = container.getContainerId().toString();
+    String containerIdStr = containerId.toString();
     String runAsUser = ctx.getExecutionAttribute(RUN_AS_USER);
     String dockerRunAsUser = runAsUser;
     Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR);
@@ -908,7 +910,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     }
 
     String commandFile = dockerClient.writeCommandToTempFile(runCommand,
-        container, nmContext);
+        containerId, nmContext);
     PrivilegedOperation launchOp = buildLaunchOp(ctx,
         commandFile, runCommand);
 
@@ -927,8 +929,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   @Override
   public void relaunchContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
-    Container container = ctx.getContainer();
-    String containerIdStr = container.getContainerId().toString();
+    ContainerId containerId = ctx.getContainer().getContainerId();
+    String containerIdStr = containerId.toString();
     // Check to see if the container already exists for relaunch
     DockerCommandExecutor.DockerContainerStatus containerStatus =
         DockerCommandExecutor.getContainerStatus(containerIdStr, conf,
@@ -937,7 +939,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         DockerCommandExecutor.isStartable(containerStatus)) {
       DockerStartCommand startCommand = new DockerStartCommand(containerIdStr);
       String commandFile = dockerClient.writeCommandToTempFile(startCommand,
-          container, nmContext);
+          containerId, nmContext);
       PrivilegedOperation launchOp = buildLaunchOp(ctx, commandFile,
           startCommand);
 
@@ -1042,12 +1044,13 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   // ipAndHost[1] contains the hostname.
   @Override
   public String[] getIpAndHost(Container container) {
-    String containerId = container.getContainerId().toString();
+    ContainerId containerId = container.getContainerId();
+    String containerIdStr = containerId.toString();
     DockerInspectCommand inspectCommand =
-        new DockerInspectCommand(containerId).getIpAndHost();
+        new DockerInspectCommand(containerIdStr).getIpAndHost();
     try {
       String commandFile = dockerClient.writeCommandToTempFile(inspectCommand,
-          container, nmContext);
+          containerId, nmContext);
       PrivilegedOperation privOp = new PrivilegedOperation(
           PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
       privOp.appendArgs(commandFile);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
index c55b83b..7bd4546 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import org.slf4j.Logger;
@@ -104,9 +103,9 @@ public final class DockerClient {
     }
   }
 
-  public String writeCommandToTempFile(DockerCommand cmd, Container container,
-      Context nmContext) throws ContainerExecutionException {
-    ContainerId containerId = container.getContainerId();
+  public String writeCommandToTempFile(DockerCommand cmd,
+      ContainerId containerId, Context nmContext)
+      throws ContainerExecutionException {
     String filePrefix = containerId.toString();
     ApplicationId appId = containerId.getApplicationAttemptId()
         .getApplicationId();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
index 0124c83..366457d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
@@ -22,7 +22,12 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -104,4 +109,31 @@ public abstract class DockerCommand {
       addCommandArguments("docker-config", clientConfigDir);
     }
   }
+
+  /**
+   * Prepare the privileged operation object that will be used to invoke
+   * the container-executor.
+   *
+   * @param dockerCommand Specific command to be run by docker.
+   * @param containerName
+   * @param env
+   * @param conf
+   * @param nmContext
+   * @return Returns the PrivilegedOperation object to be used.
+   * @throws ContainerExecutionException
+   */
+  public PrivilegedOperation preparePrivilegedOperation(
+      DockerCommand dockerCommand, String containerName, Map<String,
+      String> env, Configuration conf, Context nmContext)
+      throws ContainerExecutionException {
+    DockerClient dockerClient = new DockerClient(conf);
+    String commandFile =
+        dockerClient.writeCommandToTempFile(dockerCommand,
+        ContainerId.fromString(containerName),
+        nmContext);
+    PrivilegedOperation dockerOp = new PrivilegedOperation(
+        PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
+    dockerOp.appendArgs(commandFile);
+    return dockerOp;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
index 6abe1cb..8a4888c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
@@ -17,7 +17,6 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
@@ -80,14 +79,9 @@ public final class DockerCommandExecutor {
       PrivilegedOperationExecutor privilegedOperationExecutor,
       boolean disableFailureLogging, Context nmContext)
       throws ContainerExecutionException {
-    DockerClient dockerClient = new DockerClient(conf);
-    String commandFile =
-        dockerClient.writeCommandToTempFile(dockerCommand,
-        nmContext.getContainers().get(ContainerId.fromString(containerId)),
-        nmContext);
-    PrivilegedOperation dockerOp = new PrivilegedOperation(
-        PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
-    dockerOp.appendArgs(commandFile);
+    PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation(
+        dockerCommand, containerId, env, conf, nmContext);
+
     if (disableFailureLogging) {
       dockerOp.disableFailureLogging();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
index d27f74d0..3ed9c18 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
@@ -20,12 +20,19 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+
+import java.util.Map;
+
 /**
  * Encapsulates the docker inspect command and its command
  * line arguments.
  */
 public class DockerInspectCommand extends DockerCommand {
   private static final String INSPECT_COMMAND = "inspect";
+  private String commandArguments;
 
   public DockerInspectCommand(String containerName) {
     super(INSPECT_COMMAND);
@@ -34,6 +41,7 @@ public class DockerInspectCommand extends DockerCommand {
 
   public DockerInspectCommand getContainerStatus() {
     super.addCommandArguments("format", "{{.State.Status}}");
+    this.commandArguments = "--format={{.State.Status}}";
     return this;
   }
 
@@ -43,6 +51,17 @@ public class DockerInspectCommand extends DockerCommand {
     // cannot parse the arguments correctly.
     super.addCommandArguments("format", "{{range(.NetworkSettings.Networks)}}"
         + "{{.IPAddress}},{{end}}{{.Config.Hostname}}");
+    this.commandArguments = "--format={{range(.NetworkSettings.Networks)}}"
+        + "{{.IPAddress}},{{end}}{{.Config.Hostname}}";
     return this;
   }
+  @Override
+  public PrivilegedOperation preparePrivilegedOperation(
+      DockerCommand dockerCommand, String containerName, Map<String,
+      String> env, Configuration conf, Context nmContext) {
+    PrivilegedOperation dockerOp = new PrivilegedOperation(
+        PrivilegedOperation.OperationType.INSPECT_DOCKER_CONTAINER);
+    dockerOp.appendArgs(commandArguments, containerName);
+    return dockerOp;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
index dcfe777..3a02982 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
@@ -16,6 +16,12 @@
  */
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+
+import java.util.Map;
+
 /**
  * Encapsulates the docker rm command and its command
  * line arguments.
@@ -27,4 +33,14 @@ public class DockerRmCommand extends DockerCommand {
     super(RM_COMMAND);
     super.addCommandArguments("name", containerName);
   }
+
+  @Override
+  public PrivilegedOperation preparePrivilegedOperation(
+      DockerCommand dockerCommand, String containerName, Map<String,
+      String> env, Configuration conf, Context nmContext) {
+    PrivilegedOperation dockerOp = new PrivilegedOperation(
+        PrivilegedOperation.OperationType.REMOVE_DOCKER_CONTAINER);
+    dockerOp.appendArgs(containerName);
+    return dockerOp;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index d9ed070..6b4ec0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1332,6 +1332,34 @@ int run_docker(const char *command_file) {
   return exit_code;
 }
 
+int exec_docker_command(char *docker_command, char **argv,
+    int argc, int optind) {
+  int i;
+  char* docker_binary = get_docker_binary(&CFG);
+  size_t command_size = argc - optind + 2;
+
+  char **args = alloc_and_clear_memory(command_size + 1, sizeof(char));
+  args[0] = docker_binary;
+  args[1] = docker_command;
+  for(i = 2; i < command_size; i++) {
+    args[i] = (char *) argv[i];
+  }
+  args[i] = NULL;
+
+  execvp(docker_binary, args);
+
+  // will only get here if execvp fails
+  fprintf(ERRORFILE, "Couldn't execute the container launch with args %s - %s\n",
+      docker_binary, strerror(errno));
+  fflush(LOGFILE);
+  fflush(ERRORFILE);
+
+  free(docker_binary);
+  free(args);
+
+  return DOCKER_RUN_FAILED;
+}
+
 int create_script_paths(const char *work_dir,
   const char *script_name, const char *cred_file,
   char** script_file_dest, char** cred_file_dest,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index 7c3ed77..47c4221 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -47,7 +47,9 @@ enum operations {
   RUN_AS_USER_DELETE = 9,
   RUN_AS_USER_LAUNCH_DOCKER_CONTAINER = 10,
   RUN_DOCKER = 11,
-  RUN_AS_USER_LIST = 12
+  RUN_AS_USER_LIST = 12,
+  REMOVE_DOCKER_CONTAINER = 13,
+  INSPECT_DOCKER_CONTAINER = 14
 };
 
 #define NM_GROUP_KEY "yarn.nodemanager.linux-container-executor.group"
@@ -263,6 +265,12 @@ int is_docker_support_enabled();
  */
 int run_docker(const char *command_file);
 
+/**
+ * Run a docker command without a command file
+ */
+int exec_docker_command(char *docker_command, char **argv,
+    int argc, int optind);
+
 /*
  * Compile the regex_str and determine if the input string matches.
  * Return 0 on match, 1 of non-match.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index b69546a..c54fd3e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -36,7 +36,7 @@ static void display_usage(FILE *stream) {
   fprintf(stream,
     "Usage: container-executor --checksetup\n"
     "       container-executor --mount-cgroups <hierarchy> "
-    "<controller=path>...\n" );
+    "<controller=path>\n" );
 
   if(is_tc_support_enabled()) {
     fprintf(stream,
@@ -52,10 +52,15 @@ static void display_usage(FILE *stream) {
 
   if(is_docker_support_enabled()) {
     fprintf(stream,
-      "       container-executor --run-docker <command-file>\n");
+      "       container-executor --run-docker <command-file>\n"
+      "       container-executor --remove-docker-container <container_id>\n"
+      "       container-executor --inspect-docker-container <container_id>\n");
   } else {
     fprintf(stream,
-      "[DISABLED] container-executor --run-docker <command-file>\n");
+      "[DISABLED] container-executor --run-docker <command-file>\n"
+      "[DISABLED] container-executor --remove-docker-container <container_id>\n"
+      "[DISABLED] container-executor --inspect-docker-container "
+      "<format> ... <container_id>\n");
   }
 
   fprintf(stream,
@@ -331,6 +336,36 @@ static int validate_arguments(int argc, char **argv , int *operation) {
     }
   }
 
+  if (strcmp("--remove-docker-container", argv[1]) == 0) {
+    if(is_docker_support_enabled()) {
+      if (argc != 3) {
+        display_usage(stdout);
+        return INVALID_ARGUMENT_NUMBER;
+      }
+      optind++;
+      *operation = REMOVE_DOCKER_CONTAINER;
+      return 0;
+    } else {
+        display_feature_disabled_message("docker");
+        return FEATURE_DISABLED;
+    }
+  }
+
+  if (strcmp("--inspect-docker-container", argv[1]) == 0) {
+    if(is_docker_support_enabled()) {
+      if (argc != 4) {
+        display_usage(stdout);
+        return INVALID_ARGUMENT_NUMBER;
+      }
+      optind++;
+      *operation = INSPECT_DOCKER_CONTAINER;
+      return 0;
+    } else {
+        display_feature_disabled_message("docker");
+        return FEATURE_DISABLED;
+    }
+  }
+
   /* Now we have to validate 'run as user' operations that don't use
     a 'long option' - we should fix this at some point. The validation/argument
     parsing here is extensive enough that it done in a separate function */
@@ -561,6 +596,12 @@ int main(int argc, char **argv) {
   case RUN_DOCKER:
     exit_code = run_docker(cmd_input.docker_command_file);
     break;
+  case REMOVE_DOCKER_CONTAINER:
+    exit_code = exec_docker_command("rm", argv, argc, optind);
+    break;
+  case INSPECT_DOCKER_CONTAINER:
+    exit_code = exec_docker_command("inspect", argv, argc, optind);
+    break;
   case RUN_AS_USER_INITIALIZE_CONTAINER:
     exit_code = set_user(cmd_input.run_as_user_name);
     if (exit_code != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
index a230d4d..50d00bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
@@ -153,14 +153,14 @@ public class TestDockerCommandExecutor {
         env, configuration, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
-    List<String> dockerCommands = getValidatedDockerCommands(ops);
+    PrivilegedOperation privOp = ops.get(0);
+    List<String> args = privOp.getArguments();
     assertEquals(1, ops.size());
-    assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
-        ops.get(0).getOperationType().name());
-    assertEquals(3, dockerCommands.size());
-    assertEquals("[docker-command-execution]", dockerCommands.get(0));
-    assertEquals("  docker-command=rm", dockerCommands.get(1));
-    assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
+    assertEquals(PrivilegedOperation.OperationType.
+        REMOVE_DOCKER_CONTAINER.name(),
+        privOp.getOperationType().name());
+    assertEquals(1, args.size());
+    assertEquals(MOCK_CONTAINER_ID, args.get(0));
   }
 
   @Test
@@ -188,16 +188,15 @@ public class TestDockerCommandExecutor {
         env, configuration, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
-    List<String> dockerCommands = getValidatedDockerCommands(ops);
+    PrivilegedOperation privOp = ops.get(0);
+    List<String> args = privOp.getArguments();
     assertEquals(1, ops.size());
-    assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
-        ops.get(0).getOperationType().name());
-    assertEquals(4, dockerCommands.size());
-    assertEquals("[docker-command-execution]", dockerCommands.get(0));
-    assertEquals("  docker-command=inspect", dockerCommands.get(1));
-    assertEquals("  format={{.State.Status}}", dockerCommands.get(2));
-    assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(3));
-
+    assertEquals(PrivilegedOperation.OperationType.
+        INSPECT_DOCKER_CONTAINER.name(),
+        privOp.getOperationType().name());
+    assertEquals(2, args.size());
+    assertEquals("--format={{.State.Status}}", args.get(0));
+    assertEquals(MOCK_CONTAINER_ID, args.get(1));
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-8187. [UI2] Individual Node page does not contain breadcrumb trail. Contributed by Zian Chen.

Posted by xk...@apache.org.
YARN-8187. [UI2] Individual Node page does not contain breadcrumb trail. Contributed by Zian Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6139c51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6139c51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6139c51

Branch: refs/heads/HDFS-12943
Commit: d6139c5106a469df72c1551100d550371f6cb7c7
Parents: f0c3dc4
Author: Sunil G <su...@apache.org>
Authored: Tue May 1 14:01:34 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue May 1 14:01:34 2018 +0530

----------------------------------------------------------------------
 .../src/main/webapp/app/templates/yarn-node.hbs | 25 ++++++++++++++++++++
 .../webapp/app/templates/yarn-node/info.hbs     |  2 --
 .../app/templates/yarn-node/yarn-nm-gpu.hbs     |  2 --
 3 files changed, 25 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6139c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
new file mode 100644
index 0000000..d82b175
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
@@ -0,0 +1,25 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{breadcrumb-bar breadcrumbs=breadcrumbs}}
+
+<div class="col-md-12 container-fluid yarn-applications-container">
+  <div class="row">
+    {{outlet}}
+  </div>
+</div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6139c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
index ad411c0..a2c708e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
@@ -16,8 +16,6 @@
   limitations under the License.
 --}}
 
-{{breadcrumb-bar breadcrumbs=breadcrumbs}}
-
 <div class="col-md-12 container-fluid">
   <div class="row">
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6139c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
index 0464cc8..f3aafe5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
@@ -16,8 +16,6 @@
   limitations under the License.
 --}}
 
-{{breadcrumb-bar breadcrumbs=breadcrumbs}}
-
 <div class="col-md-12 container-fluid">
   <div class="row">
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-8221. RMWebServices also need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.

Posted by xk...@apache.org.
YARN-8221. RMWebServices also need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef3ecc30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef3ecc30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef3ecc30

Branch: refs/heads/HDFS-12943
Commit: ef3ecc308dbea41c6a88bd4d16739c7bbc10cdda
Parents: bff3d7b
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Fri Apr 27 22:58:10 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Apr 27 22:58:10 2018 +0530

----------------------------------------------------------------------
 .../server/resourcemanager/webapp/RMWebServices.java   | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3ecc30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index d30764d..0564b67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -228,6 +228,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
 
   @VisibleForTesting
   boolean isCentralizedNodeLabelConfiguration = true;
+  private boolean displayPerUserApps = false;
 
   public final static String DELEGATION_TOKEN_HEADER =
       "Hadoop-YARN-RM-Delegation-Token";
@@ -240,6 +241,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     this.conf = conf;
     isCentralizedNodeLabelConfiguration =
         YarnConfiguration.isCentralizedNodeLabelConfiguration(conf);
+    this.displayPerUserApps  = conf.getBoolean(
+        YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER,
+        YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
   }
 
   RMWebServices(ResourceManager rm, Configuration conf,
@@ -608,7 +612,14 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       DeSelectFields deSelectFields = new DeSelectFields();
       deSelectFields.initFields(unselectedFields);
 
-      AppInfo app = new AppInfo(rm, rmapp, hasAccess(rmapp, hsr),
+      boolean allowAccess = hasAccess(rmapp, hsr);
+      // Given RM is configured to display apps per user, skip apps to which
+      // this caller doesn't have access to view.
+      if (displayPerUserApps && !allowAccess) {
+        continue;
+      }
+
+      AppInfo app = new AppInfo(rm, rmapp, allowAccess,
           WebAppUtils.getHttpSchemePrefix(conf), deSelectFields);
       allApps.add(app);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HADOOP-15434. Upgrade to ADLS SDK that exposes current timeout.

Posted by xk...@apache.org.
HADOOP-15434. Upgrade to ADLS SDK that exposes current timeout.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85381c7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85381c7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85381c7b

Branch: refs/heads/HDFS-12943
Commit: 85381c7b605b5f49664f101cf025e443c300b94c
Parents: e6a80e4
Author: Sean Mackrory <ma...@apache.org>
Authored: Tue May 1 09:47:52 2018 -0600
Committer: Sean Mackrory <ma...@apache.org>
Committed: Wed May 2 21:30:31 2018 -0600

----------------------------------------------------------------------
 hadoop-tools/hadoop-azure-datalake/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85381c7b/hadoop-tools/hadoop-azure-datalake/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 57515b0..5603db9 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -33,7 +33,7 @@
     <minimalJsonVersion>0.9.1</minimalJsonVersion>
     <file.encoding>UTF-8</file.encoding>
     <downloadSources>true</downloadSources>
-    <azure.data.lake.store.sdk.version>2.2.7</azure.data.lake.store.sdk.version>
+    <azure.data.lake.store.sdk.version>2.2.9</azure.data.lake.store.sdk.version>
   </properties>
   <build>
     <plugins>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-8151. Yarn RM Epoch should wrap around. Contributed by Young Chen.

Posted by xk...@apache.org.
YARN-8151. Yarn RM Epoch should wrap around. Contributed by Young Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6a80e47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6a80e47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6a80e47

Branch: refs/heads/HDFS-12943
Commit: e6a80e476d4348a4373e6dd5792d70edff16516f
Parents: 87c23ef
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 2 17:23:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 2 17:23:17 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java       |  4 ++++
 .../src/main/resources/yarn-default.xml           |  7 +++++++
 .../recovery/FileSystemRMStateStore.java          |  4 ++--
 .../recovery/LeveldbRMStateStore.java             |  2 +-
 .../recovery/MemoryRMStateStore.java              |  2 +-
 .../resourcemanager/recovery/RMStateStore.java    | 18 +++++++++++++++++-
 .../resourcemanager/recovery/ZKRMStateStore.java  |  4 ++--
 .../recovery/RMStateStoreTestBase.java            | 14 ++++++++++++++
 .../recovery/TestFSRMStateStore.java              |  1 +
 .../recovery/TestLeveldbRMStateStore.java         |  1 +
 .../recovery/TestZKRMStateStore.java              |  1 +
 11 files changed, 51 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8aa136d..5ba2e05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -188,6 +188,10 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_EPOCH = RM_PREFIX + "epoch";
   public static final long DEFAULT_RM_EPOCH = 0L;
 
+  /** The epoch range before wrap around. 0 disables wrap around*/
+  public static final String RM_EPOCH_RANGE = RM_EPOCH + ".range";
+  public static final long DEFAULT_RM_EPOCH_RANGE = 0;
+
   /** The address of the applications manager interface in the RM.*/
   public static final String RM_ADDRESS = 
     RM_PREFIX + "address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 85915c2..4eb509f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -677,6 +677,13 @@
   </property>
 
   <property>
+    <description>The range of values above base epoch that the RM will use before
+      wrapping around</description>
+    <name>yarn.resourcemanager.epoch.range</name>
+    <value>0</value>
+  </property>
+
+  <property>
     <description>The list of RM nodes in the cluster when HA is
       enabled. See description of yarn.resourcemanager.ha
       .enabled for full details on how this is used.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 19297bc..b797283 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -205,12 +205,12 @@ public class FileSystemRMStateStore extends RMStateStore {
       Epoch epoch = new EpochPBImpl(EpochProto.parseFrom(data));
       currentEpoch = epoch.getEpoch();
       // increment epoch and store it
-      byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
+      byte[] storeData = Epoch.newInstance(nextEpoch(currentEpoch)).getProto()
           .toByteArray();
       updateFile(epochNodePath, storeData, false);
     } else {
       // initialize epoch file with 1 for the next time.
-      byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
+      byte[] storeData = Epoch.newInstance(nextEpoch(currentEpoch)).getProto()
           .toByteArray();
       writeFileWithRetries(epochNodePath, storeData, false);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
index 36a8dfa..e7fb02f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
@@ -259,7 +259,7 @@ public class LeveldbRMStateStore extends RMStateStore {
       if (data != null) {
         currentEpoch = EpochProto.parseFrom(data).getEpoch();
       }
-      EpochProto proto = Epoch.newInstance(currentEpoch + 1).getProto();
+      EpochProto proto = Epoch.newInstance(nextEpoch(currentEpoch)).getProto();
       db.put(dbKeyBytes, proto.toByteArray());
     } catch (DBException e) {
       throw new IOException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java
index 5041000..219e10a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java
@@ -59,7 +59,7 @@ public class MemoryRMStateStore extends RMStateStore {
   @Override
   public synchronized long getAndIncrementEpoch() throws Exception {
     long currentEpoch = epoch;
-    epoch = epoch + 1;
+    epoch = nextEpoch(epoch);
     return currentEpoch;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index b4dd378..242b5d0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -104,6 +104,7 @@ public abstract class RMStateStore extends AbstractService {
   protected static final String VERSION_NODE = "RMVersionNode";
   protected static final String EPOCH_NODE = "EpochNode";
   protected long baseEpoch;
+  private long epochRange;
   protected ResourceManager resourceManager;
   private final ReadLock readLock;
   private final WriteLock writeLock;
@@ -732,6 +733,8 @@ public abstract class RMStateStore extends AbstractService {
     // read the base epoch value from conf
     baseEpoch = conf.getLong(YarnConfiguration.RM_EPOCH,
         YarnConfiguration.DEFAULT_RM_EPOCH);
+    epochRange = conf.getLong(YarnConfiguration.RM_EPOCH_RANGE,
+        YarnConfiguration.DEFAULT_RM_EPOCH_RANGE);
     initInternal(conf);
   }
 
@@ -818,7 +821,20 @@ public abstract class RMStateStore extends AbstractService {
    * Get the current epoch of RM and increment the value.
    */
   public abstract long getAndIncrementEpoch() throws Exception;
-  
+
+  /**
+   * Compute the next epoch value by incrementing by one.
+   * Wraps around if the epoch range is exceeded so that
+   * when federation is enabled epoch collisions can be avoided.
+   */
+  protected long nextEpoch(long epoch){
+    long epochVal = epoch - baseEpoch + 1;
+    if (epochRange > 0) {
+      epochVal %= epochRange;
+    }
+    return  epochVal + baseEpoch;
+  }
+
   /**
    * Blocking API
    * The derived class must recover state from the store and return a new 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 9073910..de1f1ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -491,13 +491,13 @@ public class ZKRMStateStore extends RMStateStore {
       Epoch epoch = new EpochPBImpl(EpochProto.parseFrom(data));
       currentEpoch = epoch.getEpoch();
       // increment epoch and store it
-      byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
+      byte[] storeData = Epoch.newInstance(nextEpoch(currentEpoch)).getProto()
           .toByteArray();
       zkManager.safeSetData(epochNodePath, storeData, -1, zkAcl,
           fencingNodePath);
     } else {
       // initialize epoch node with 1 for the next time.
-      byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
+      byte[] storeData = Epoch.newInstance(nextEpoch(currentEpoch)).getProto()
           .toByteArray();
       zkManager.safeCreate(epochNodePath, storeData, zkAcl,
           CreateMode.PERSISTENT, zkAcl, fencingNodePath);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
index 957d4ce..3454d72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
@@ -94,6 +94,8 @@ public class RMStateStoreTestBase {
 
   protected final long epoch = 10L;
 
+  private final long epochRange = 10L;
+
   static class TestDispatcher implements Dispatcher, EventHandler<Event> {
 
     ApplicationAttemptId attemptId;
@@ -141,6 +143,10 @@ public class RMStateStoreTestBase {
     boolean attemptExists(RMAppAttempt attempt) throws Exception;
   }
 
+  public long getEpochRange() {
+    return epochRange;
+  }
+
   void waitNotify(TestDispatcher dispatcher) {
     long startTime = System.currentTimeMillis();
     while(!dispatcher.notified) {
@@ -576,6 +582,14 @@ public class RMStateStoreTestBase {
     
     long thirdTimeEpoch = store.getAndIncrementEpoch();
     Assert.assertEquals(epoch + 2, thirdTimeEpoch);
+
+    for (int i = 0; i < epochRange; ++i) {
+      store.getAndIncrementEpoch();
+    }
+    long wrappedEpoch = store.getAndIncrementEpoch();
+    // Epoch should have wrapped around and then incremented once for a total
+    // of + 3
+    Assert.assertEquals(epoch + 3, wrappedEpoch);
   }
 
   public void testAppDeletion(RMStateStoreHelper stateStoreHelper)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java
index fe4a701..14f5404 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java
@@ -118,6 +118,7 @@ public class TestFSRMStateStore extends RMStateStoreTestBase {
       conf.setLong(YarnConfiguration.FS_RM_STATE_STORE_RETRY_INTERVAL_MS,
               900L);
       conf.setLong(YarnConfiguration.RM_EPOCH, epoch);
+      conf.setLong(YarnConfiguration.RM_EPOCH_RANGE, getEpochRange());
       if (adminCheckEnable) {
         conf.setBoolean(
           YarnConfiguration.YARN_INTERMEDIATE_DATA_ENCRYPTION, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
index afd0c77..576ee7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
@@ -83,6 +83,7 @@ public class TestLeveldbRMStateStore extends RMStateStoreTestBase {
   @Test(timeout = 60000)
   public void testEpoch() throws Exception {
     conf.setLong(YarnConfiguration.RM_EPOCH, epoch);
+    conf.setLong(YarnConfiguration.RM_EPOCH_RANGE, getEpochRange());
     LeveldbStateStoreTester tester = new LeveldbStateStoreTester();
     testEpoch(tester);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
index d8718e0..4cba266 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
@@ -210,6 +210,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
           curatorTestingServer.getConnectString());
       conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
       conf.setLong(YarnConfiguration.RM_EPOCH, epoch);
+      conf.setLong(YarnConfiguration.RM_EPOCH_RANGE, getEpochRange());
       this.store = new TestZKRMStateStoreInternal(conf, workingZnode);
       return this.store;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: MAPREDUCE-7073. Optimize TokenCache#obtainTokensForNamenodesInternal

Posted by xk...@apache.org.
MAPREDUCE-7073. Optimize TokenCache#obtainTokensForNamenodesInternal

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a95a452
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a95a452
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a95a452

Branch: refs/heads/HDFS-12943
Commit: 1a95a4524a8c6c7be601ce8b92640a6a76164a2c
Parents: 3726926
Author: Bibin A Chundatt <bi...@apache.org>
Authored: Wed May 2 16:14:28 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 2 16:14:28 2018 +0900

----------------------------------------------------------------------
 .../hadoop/mapreduce/security/TokenCache.java     | 14 +++++++++-----
 .../hadoop/mapreduce/security/TestTokenCache.java | 18 +++++++++---------
 2 files changed, 18 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a95a452/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
index 12fced9..1156c67 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -96,8 +97,9 @@ public class TokenCache {
     for(Path p: ps) {
       fsSet.add(p.getFileSystem(conf));
     }
+    String masterPrincipal = Master.getMasterPrincipal(conf);
     for (FileSystem fs : fsSet) {
-      obtainTokensForNamenodesInternal(fs, credentials, conf);
+      obtainTokensForNamenodesInternal(fs, credentials, conf, masterPrincipal);
     }
   }
 
@@ -122,15 +124,17 @@ public class TokenCache {
    * @param conf
    * @throws IOException
    */
-  static void obtainTokensForNamenodesInternal(FileSystem fs, 
-      Credentials credentials, Configuration conf) throws IOException {
+  static void obtainTokensForNamenodesInternal(FileSystem fs,
+      Credentials credentials, Configuration conf, String renewer)
+      throws IOException {
     // RM skips renewing token with empty renewer
     String delegTokenRenewer = "";
     if (!isTokenRenewalExcluded(fs, conf)) {
-      delegTokenRenewer = Master.getMasterPrincipal(conf);
-      if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
+      if (StringUtils.isEmpty(renewer)) {
         throw new IOException(
             "Can't get Master Kerberos principal for use as renewer");
+      } else {
+        delegTokenRenewer = renewer;
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a95a452/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
index 127f8ae..a44e533 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
@@ -56,8 +56,8 @@ public class TestTokenCache {
   @Test
   public void testObtainTokens() throws Exception {
     Credentials credentials = new Credentials();
-    FileSystem fs = mock(FileSystem.class);  
-    TokenCache.obtainTokensForNamenodesInternal(fs, credentials, conf);
+    FileSystem fs = mock(FileSystem.class);
+    TokenCache.obtainTokensForNamenodesInternal(fs, credentials, conf, renewer);
     verify(fs).addDelegationTokens(eq(renewer), eq(credentials));
   }
 
@@ -105,23 +105,23 @@ public class TestTokenCache {
     checkToken(creds, newerToken1);
     
     // get token for fs1, see that fs2's token was loaded 
-    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer);
     checkToken(creds, newerToken1, token2);
     
     // get token for fs2, nothing should change since already present
-    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf, renewer);
     checkToken(creds, newerToken1, token2);
     
     // get token for fs3, should only add token for fs3
-    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf, renewer);
     Token<?> token3 = creds.getToken(new Text(fs3.getCanonicalServiceName()));
     assertTrue(token3 != null);
     checkToken(creds, newerToken1, token2, token3);
     
     // be paranoid, check one last time that nothing changes
-    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
-    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf);
-    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer);
+    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf, renewer);
+    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf, renewer);
     checkToken(creds, newerToken1, token2, token3);
   }
 
@@ -202,7 +202,7 @@ public class TestTokenCache {
     // wait to set, else the obtain tokens call above will fail with FNF
     conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
     creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
-    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer);
     String fs_addr = fs1.getCanonicalServiceName();
     Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
     assertNotNull("Token for nn is null", nnt);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. Contributed by Anu Engineer.

Posted by xk...@apache.org.
HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0c3dc4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0c3dc4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0c3dc4c

Branch: refs/heads/HDFS-12943
Commit: f0c3dc4cf40575497ca6f29c037e43fa50e0ffdd
Parents: 2d319e3
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Apr 30 21:41:10 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Apr 30 21:41:10 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/scm/SCMMXBean.java   |   50 -
 .../org/apache/hadoop/hdds/scm/SCMStorage.java  |   73 -
 .../hdds/scm/StorageContainerManager.java       | 1290 ------------------
 .../scm/StorageContainerManagerHttpServer.java  |   76 --
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |    5 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java |  222 +++
 .../scm/server/SCMClientProtocolServer.java     |  314 +++++
 .../scm/server/SCMDatanodeProtocolServer.java   |  350 +++++
 .../hadoop/hdds/scm/server/SCMMXBean.java       |   50 +
 .../hadoop/hdds/scm/server/SCMStorage.java      |   73 +
 .../scm/server/StorageContainerManager.java     |  722 ++++++++++
 .../StorageContainerManagerHttpServer.java      |   77 ++
 .../hadoop/hdds/scm/server/package-info.java    |   22 +
 .../TestStorageContainerManagerHttpServer.java  |    7 +-
 hadoop-ozone/common/src/main/bin/ozone          |    2 +-
 .../container/TestContainerStateManager.java    |   29 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   34 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   10 +-
 .../ozone/TestStorageContainerManager.java      |   20 +-
 .../TestStorageContainerManagerHelper.java      |    2 +-
 .../ozone/ksm/TestContainerReportWithKeys.java  |    2 +-
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   |    8 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |   13 +-
 .../apache/hadoop/ozone/scm/TestSCMMXBean.java  |    2 +-
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |   16 +-
 25 files changed, 1912 insertions(+), 1557 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
deleted file mode 100644
index 17b6814..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
-
-import java.util.Map;
-
-/**
- *
- * This is the JMX management interface for scm information.
- */
-@InterfaceAudience.Private
-public interface SCMMXBean extends ServiceRuntimeInfo {
-
-  /**
-   * Get the SCM RPC server port that used to listen to datanode requests.
-   * @return SCM datanode RPC server port
-   */
-  String getDatanodeRpcPort();
-
-  /**
-   * Get the SCM RPC server port that used to listen to client requests.
-   * @return SCM client RPC server port
-   */
-  String getClientRpcPort();
-
-  /**
-   * Get container report info that includes container IO stats of nodes.
-   * @return The datanodeUUid to report json string mapping
-   */
-  Map<String, String> getContainerReport();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
deleted file mode 100644
index 27e9363..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.ozone.common.Storage;
-
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
-import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR;
-
-/**
- * SCMStorage is responsible for management of the StorageDirectories used by
- * the SCM.
- */
-public class SCMStorage extends Storage {
-
-  /**
-   * Construct SCMStorage.
-   * @throws IOException if any directories are inaccessible.
-   */
-  public SCMStorage(OzoneConfiguration conf) throws IOException {
-    super(NodeType.SCM, getOzoneMetaDirPath(conf), STORAGE_DIR);
-  }
-
-  public void setScmId(String scmId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("SCM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(SCM_ID, scmId);
-    }
-  }
-
-  /**
-   * Retrieves the SCM ID from the version file.
-   * @return SCM_ID
-   */
-  public String getScmId() {
-    return getStorageInfo().getProperty(SCM_ID);
-  }
-
-  @Override
-  protected Properties getNodeProperties() {
-    String scmId = getScmId();
-    if (scmId == null) {
-      scmId = UUID.randomUUID().toString();
-    }
-    Properties scmProperties = new Properties();
-    scmProperties.setProperty(SCM_ID, scmId);
-    return scmProperties;
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
deleted file mode 100644
index ce0d4f8..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
+++ /dev/null
@@ -1,1290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.Mapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
-    .DeleteBlockTransactionResult;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos
-    .ContainerBlocksDeletionACKResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeAddressList;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SendContainerReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.common.StorageInfo;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.ozone.protocolPB
-    .ScmBlockLocationProtocolServerSideTranslatorPB;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolServerSideTranslatorPB;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerLocationProtocolServerSideTranslatorPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.GenericOptionsParser;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DATANODE_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.hdds.protocol.proto
-    .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result;
-import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-/**
- * StorageContainerManager is the main entry point for the service that provides
- * information about which SCM nodes host containers.
- *
- * DataNodes report to StorageContainerManager using heartbeat
- * messages. SCM allocates containers and returns a pipeline.
- *
- * A client once it gets a pipeline (a list of datanodes) will connect to the
- * datanodes and create a container, which then can be used to store data.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public class StorageContainerManager extends ServiceRuntimeInfoImpl
-    implements StorageContainerDatanodeProtocol,
-    StorageContainerLocationProtocol, ScmBlockLocationProtocol, SCMMXBean {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(StorageContainerManager.class);
-
-  /**
-   *  Startup options.
-   */
-  public enum StartupOption {
-    INIT("-init"),
-    CLUSTERID("-clusterid"),
-    GENCLUSTERID("-genclusterid"),
-    REGULAR("-regular"),
-    HELP("-help");
-
-    private final String name;
-    private String clusterId = null;
-
-    public void setClusterId(String cid) {
-      if(cid != null && !cid.isEmpty()) {
-        clusterId = cid;
-      }
-    }
-
-    public String getClusterId() {
-      return clusterId;
-    }
-
-    StartupOption(String arg) {
-      this.name = arg;
-    }
-
-    public String getName() {
-      return name;
-    }
-  }
-
-  /**
-   * NodeManager and container Managers for SCM.
-   */
-  private final NodeManager scmNodeManager;
-  private final Mapping scmContainerManager;
-  private final BlockManager scmBlockManager;
-  private final SCMStorage scmStorage;
-
-  /** The RPC server that listens to requests from DataNodes. */
-  private final RPC.Server datanodeRpcServer;
-  private final InetSocketAddress datanodeRpcAddress;
-
-  /** The RPC server that listens to requests from clients. */
-  private final RPC.Server clientRpcServer;
-  private final InetSocketAddress clientRpcAddress;
-
-  /** The RPC server that listens to requests from block service clients. */
-  private final RPC.Server blockRpcServer;
-  private final InetSocketAddress blockRpcAddress;
-
-  private final StorageContainerManagerHttpServer httpServer;
-
-  /** SCM mxbean. */
-  private ObjectName scmInfoBeanName;
-
-  /** SCM super user. */
-  private final String scmUsername;
-  private final Collection<String> scmAdminUsernames;
-
-  /** SCM metrics. */
-  private static SCMMetrics metrics;
-  /** Key = DatanodeUuid, value = ContainerStat. */
-  private Cache<String, ContainerStat> containerReportCache;
-
-
-  private static final String USAGE =
-      "Usage: \n ozone scm [genericOptions] "
-          + "[ " + StartupOption.INIT.getName() + " [ "
-          + StartupOption.CLUSTERID.getName() + " <cid> ] ]\n "
-          + "ozone scm [genericOptions] [ "
-          + StartupOption.GENCLUSTERID.getName() + " ]\n " +
-          "ozone scm [ "
-          + StartupOption.HELP.getName() + " ]\n";
-  /**
-   * Creates a new StorageContainerManager.  Configuration will be updated with
-   * information on the actual listening addresses used for RPC servers.
-   *
-   * @param conf configuration
-   */
-  private StorageContainerManager(OzoneConfiguration conf)
-      throws IOException {
-
-    final int handlerCount = conf.getInt(
-        OZONE_SCM_HANDLER_COUNT_KEY, OZONE_SCM_HANDLER_COUNT_DEFAULT);
-    final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
-    StorageContainerManager.initMetrics();
-    initContainerReportCache(conf);
-
-    scmStorage = new SCMStorage(conf);
-    if (scmStorage.getState() != StorageState.INITIALIZED) {
-      throw new SCMException("SCM not initialized.",
-          ResultCodes.SCM_NOT_INITIALIZED);
-    }
-    scmNodeManager = new SCMNodeManager(conf, scmStorage.getClusterID(), this);
-    scmContainerManager = new ContainerMapping(conf, scmNodeManager, cacheSize);
-    scmBlockManager = new BlockManagerImpl(conf, scmNodeManager,
-        scmContainerManager, cacheSize);
-
-    scmAdminUsernames = conf.getTrimmedStringCollection(
-        OzoneConfigKeys.OZONE_ADMINISTRATORS);
-    scmUsername = UserGroupInformation.getCurrentUser().getUserName();
-    if (!scmAdminUsernames.contains(scmUsername)) {
-      scmAdminUsernames.add(scmUsername);
-    }
-
-    RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
-        ProtobufRpcEngine.class);
-    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    BlockingService dnProtoPbService = StorageContainerDatanodeProtocolProtos.
-        StorageContainerDatanodeProtocolService.newReflectiveBlockingService(
-        new StorageContainerDatanodeProtocolServerSideTranslatorPB(this));
-
-    final InetSocketAddress datanodeRpcAddr =
-        HddsServerUtil.getScmDataNodeBindAddress(conf);
-    datanodeRpcServer = startRpcServer(conf, datanodeRpcAddr,
-        StorageContainerDatanodeProtocolPB.class, dnProtoPbService,
-        handlerCount);
-    datanodeRpcAddress = updateRPCListenAddress(conf,
-        OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr, datanodeRpcServer);
-
-    // SCM Container Service RPC
-    BlockingService storageProtoPbService =
-        StorageContainerLocationProtocolProtos
-            .StorageContainerLocationProtocolService
-            .newReflectiveBlockingService(
-            new StorageContainerLocationProtocolServerSideTranslatorPB(this));
-
-    final InetSocketAddress scmAddress =
-        HddsServerUtil.getScmClientBindAddress(conf);
-    clientRpcServer = startRpcServer(conf, scmAddress,
-        StorageContainerLocationProtocolPB.class, storageProtoPbService,
-        handlerCount);
-    clientRpcAddress = updateRPCListenAddress(conf,
-        OZONE_SCM_CLIENT_ADDRESS_KEY, scmAddress, clientRpcServer);
-
-    // SCM Block Service RPC
-    BlockingService blockProtoPbService =
-        ScmBlockLocationProtocolProtos
-            .ScmBlockLocationProtocolService
-            .newReflectiveBlockingService(
-            new ScmBlockLocationProtocolServerSideTranslatorPB(this));
-
-    final InetSocketAddress scmBlockAddress =
-        HddsServerUtil.getScmBlockClientBindAddress(conf);
-    blockRpcServer = startRpcServer(conf, scmBlockAddress,
-        ScmBlockLocationProtocolPB.class, blockProtoPbService,
-        handlerCount);
-    blockRpcAddress = updateRPCListenAddress(conf,
-        OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmBlockAddress, blockRpcServer);
-
-    httpServer = new StorageContainerManagerHttpServer(conf);
-
-    registerMXBean();
-  }
-
-  /**
-   * Initialize container reports cache that sent from datanodes.
-   *
-   * @param conf
-   */
-  private void initContainerReportCache(OzoneConfiguration conf) {
-    containerReportCache = CacheBuilder.newBuilder()
-        .expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS)
-        .maximumSize(Integer.MAX_VALUE)
-        .removalListener(new RemovalListener<String, ContainerStat>() {
-          @Override
-          public void onRemoval(
-              RemovalNotification<String, ContainerStat> removalNotification) {
-            synchronized (containerReportCache) {
-              ContainerStat stat = removalNotification.getValue();
-              // remove invalid container report
-              metrics.decrContainerStat(stat);
-              LOG.debug(
-                  "Remove expired container stat entry for datanode: {}.",
-                  removalNotification.getKey());
-            }
-          }
-        }).build();
-  }
-
-  /**
-   * Builds a message for logging startup information about an RPC server.
-   *
-   * @param description RPC server description
-   * @param addr RPC server listening address
-   * @return server startup message
-   */
-  private static String buildRpcServerStartMessage(String description,
-      InetSocketAddress addr) {
-    return addr != null ? String.format("%s is listening at %s",
-        description, addr.toString()) :
-        String.format("%s not started", description);
-  }
-
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   *
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  private static RPC.Server startRpcServer(OzoneConfiguration conf,
-      InetSocketAddress addr, Class<?> protocol, BlockingService instance,
-      int handlerCount)
-      throws IOException {
-    RPC.Server rpcServer = new RPC.Builder(conf)
-        .setProtocol(protocol)
-        .setInstance(instance)
-        .setBindAddress(addr.getHostString())
-        .setPort(addr.getPort())
-        .setNumHandlers(handlerCount)
-        .setVerbose(false)
-        .setSecretManager(null)
-        .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-    return rpcServer;
-  }
-
-  private void registerMXBean() {
-    Map<String, String> jmxProperties = new HashMap<>();
-    jmxProperties.put("component", "ServerRuntime");
-    this.scmInfoBeanName =
-        MBeans.register("StorageContainerManager",
-            "StorageContainerManagerInfo",
-            jmxProperties,
-            this);
-  }
-
-  private void unregisterMXBean() {
-    if(this.scmInfoBeanName != null) {
-      MBeans.unregister(this.scmInfoBeanName);
-      this.scmInfoBeanName = null;
-    }
-  }
-
-  /**
-   * Main entry point for starting StorageContainerManager.
-   *
-   * @param argv arguments
-   * @throws IOException if startup fails due to I/O error
-   */
-  public static void main(String[] argv) throws IOException {
-    if (DFSUtil.parseHelpArgument(argv, USAGE,
-        System.out, true)) {
-      System.exit(0);
-    }
-    try {
-      OzoneConfiguration conf = new OzoneConfiguration();
-      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
-      if (!hParser.isParseSuccessful()) {
-        System.err.println("USAGE: " + USAGE + "\n");
-        hParser.printGenericCommandUsage(System.err);
-        System.exit(1);
-      }
-      StringUtils.startupShutdownMessage(StorageContainerManager.class,
-          argv, LOG);
-      StorageContainerManager scm = createSCM(hParser.getRemainingArgs(), conf);
-      if (scm != null) {
-        scm.start();
-        scm.join();
-      }
-    } catch (Throwable t) {
-      LOG.error("Failed to start the StorageContainerManager.", t);
-      terminate(1, t);
-    }
-  }
-
-  private static void printUsage(PrintStream out) {
-    out.println(USAGE + "\n");
-  }
-
-  public static StorageContainerManager createSCM(String[] argv,
-      OzoneConfiguration conf) throws IOException {
-    if (!HddsUtils.isHddsEnabled(conf)) {
-      System.err.println("SCM cannot be started in secure mode or when " +
-          OZONE_ENABLED + " is set to false");
-      System.exit(1);
-    }
-    StartupOption startOpt = parseArguments(argv);
-    if (startOpt == null) {
-      printUsage(System.err);
-      terminate(1);
-      return null;
-    }
-    switch (startOpt) {
-    case INIT:
-      terminate(scmInit(conf) ? 0 : 1);
-      return null;
-    case GENCLUSTERID:
-      System.out.println("Generating new cluster id:");
-      System.out.println(StorageInfo.newClusterID());
-      terminate(0);
-      return null;
-    case HELP:
-      printUsage(System.err);
-      terminate(0);
-      return null;
-    default:
-      return new StorageContainerManager(conf);
-    }
-  }
-
-  /**
-   * Routine to set up the Version info for StorageContainerManager.
-   *
-   * @param conf OzoneConfiguration
-   * @return true if SCM initialization is successful, false otherwise.
-   * @throws IOException if init fails due to I/O error
-   */
-  public static boolean scmInit(OzoneConfiguration conf) throws IOException {
-    SCMStorage scmStorage = new SCMStorage(conf);
-    StorageState state = scmStorage.getState();
-    if (state != StorageState.INITIALIZED) {
-      try {
-        String clusterId = StartupOption.INIT.getClusterId();
-        if (clusterId != null && !clusterId.isEmpty()) {
-          scmStorage.setClusterId(clusterId);
-        }
-        scmStorage.initialize();
-        System.out.println("SCM initialization succeeded." +
-            "Current cluster id for sd=" + scmStorage.getStorageDir() + ";cid="
-                + scmStorage.getClusterID());
-        return true;
-      } catch (IOException ioe) {
-        LOG.error("Could not initialize SCM version file", ioe);
-        return false;
-      }
-    } else {
-      System.out.println("SCM already initialized. Reusing existing" +
-          " cluster id for sd=" + scmStorage.getStorageDir() + ";cid="
-              + scmStorage.getClusterID());
-      return true;
-    }
-  }
-
-  private static StartupOption parseArguments(String[] args) {
-    int argsLen = (args == null) ? 0 : args.length;
-    StartupOption startOpt = StartupOption.HELP;
-    if (argsLen == 0) {
-      startOpt = StartupOption.REGULAR;
-    }
-    for (int i = 0; i < argsLen; i++) {
-      String cmd = args[i];
-      if (StartupOption.INIT.getName().equalsIgnoreCase(cmd)) {
-        startOpt = StartupOption.INIT;
-        if (argsLen > 3) {
-          return null;
-        }
-        for (i = i + 1; i < argsLen; i++) {
-          if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
-            i++;
-            if (i < argsLen && !args[i].isEmpty()) {
-              startOpt.setClusterId(args[i]);
-            } else {
-              // if no cluster id specified or is empty string, return null
-              LOG.error("Must specify a valid cluster ID after the "
-                  + StartupOption.CLUSTERID.getName() + " flag");
-              return null;
-            }
-          } else {
-            return null;
-          }
-        }
-      } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
-        if (argsLen > 1) {
-          return null;
-        }
-        startOpt = StartupOption.GENCLUSTERID;
-      }
-    }
-    return startOpt;
-  }
-
-  /**
-   * Returns a SCMCommandRepose from the SCM Command.
-   * @param cmd - Cmd
-   * @return SCMCommandResponseProto
-   * @throws InvalidProtocolBufferException
-   */
-  @VisibleForTesting
-  public SCMCommandResponseProto getCommandResponse(SCMCommand cmd,
-      final String datanodID)
-      throws IOException {
-    SCMCmdType type = cmd.getType();
-    SCMCommandResponseProto.Builder builder =
-        SCMCommandResponseProto.newBuilder()
-        .setDatanodeUUID(datanodID);
-    switch (type) {
-    case registeredCommand:
-      return builder.setCmdType(SCMCmdType.registeredCommand)
-          .setRegisteredProto(
-              SCMRegisteredCmdResponseProto.getDefaultInstance())
-          .build();
-    case versionCommand:
-      return builder.setCmdType(SCMCmdType.versionCommand)
-          .setVersionProto(SCMVersionResponseProto.getDefaultInstance())
-          .build();
-    case sendContainerReport:
-      return builder.setCmdType(SCMCmdType.sendContainerReport)
-          .setSendReport(SendContainerReportProto.getDefaultInstance())
-          .build();
-    case reregisterCommand:
-      return builder.setCmdType(SCMCmdType.reregisterCommand)
-          .setReregisterProto(SCMReregisterCmdResponseProto
-              .getDefaultInstance())
-          .build();
-    case deleteBlocksCommand:
-      // Once SCM sends out the deletion message, increment the count.
-      // this is done here instead of when SCM receives the ACK, because
-      // DN might not be able to response the ACK for sometime. In case
-      // it times out, SCM needs to re-send the message some more times.
-      List<Long> txs = ((DeleteBlocksCommand) cmd).blocksTobeDeleted()
-          .stream().map(tx -> tx.getTxID()).collect(Collectors.toList());
-      this.getScmBlockManager().getDeletedBlockLog().incrementCount(txs);
-      return builder.setCmdType(SCMCmdType.deleteBlocksCommand)
-          .setDeleteBlocksProto(((DeleteBlocksCommand) cmd).getProto())
-          .build();
-    case closeContainerCommand:
-      return builder.setCmdType(SCMCmdType.closeContainerCommand)
-          .setCloseContainerProto(((CloseContainerCommand)cmd).getProto())
-          .build();
-    default:
-      throw new IllegalArgumentException("Not implemented");
-    }
-  }
-
-  @VisibleForTesting
-  public static SCMRegisteredCmdResponseProto getRegisteredResponse(
-      SCMCommand cmd, SCMNodeAddressList addressList) {
-    Preconditions.checkState(cmd.getClass() == RegisteredCommand.class);
-    RegisteredCommand rCmd = (RegisteredCommand) cmd;
-    SCMCmdType type = cmd.getType();
-    if (type != SCMCmdType.registeredCommand) {
-      throw new IllegalArgumentException("Registered command is not well " +
-          "formed. Internal Error.");
-    }
-    return SCMRegisteredCmdResponseProto.newBuilder()
-        //TODO : Fix this later when we have multiple SCM support.
-        //.setAddressList(addressList)
-        .setErrorCode(rCmd.getError())
-        .setClusterID(rCmd.getClusterID())
-        .setDatanodeUUID(rCmd.getDatanodeUUID()).build();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public Pipeline getContainer(String containerName) throws IOException {
-    checkAdminAccess();
-    return scmContainerManager.getContainer(containerName).getPipeline();
-  }
-
-  @VisibleForTesting
-  public ContainerInfo getContainerInfo(String containerName)
-      throws IOException {
-    return scmContainerManager.getContainer(containerName);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<ContainerInfo> listContainer(String startName,
-      String prefixName, int count) throws IOException {
-    return scmContainerManager.listContainer(startName, prefixName, count);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void deleteContainer(String containerName) throws IOException {
-    checkAdminAccess();
-    scmContainerManager.deleteContainer(containerName);
-  }
-
-  /**
-   * Queries a list of Node Statuses.
-   *
-   * @param nodeStatuses
-   * @param queryScope
-   * @param poolName @return List of Datanodes.
-   */
-  @Override
-  public HddsProtos.NodePool queryNode(EnumSet<NodeState> nodeStatuses,
-      HddsProtos.QueryScope queryScope, String poolName) throws IOException {
-
-    if (queryScope == HddsProtos.QueryScope.POOL) {
-      throw new IllegalArgumentException("Not Supported yet");
-    }
-
-    List<DatanodeDetails> datanodes = queryNode(nodeStatuses);
-    HddsProtos.NodePool.Builder poolBuilder =
-        HddsProtos.NodePool.newBuilder();
-
-    for (DatanodeDetails datanode : datanodes) {
-      HddsProtos.Node node = HddsProtos.Node.newBuilder()
-          .setNodeID(datanode.getProtoBufMessage())
-          .addAllNodeStates(nodeStatuses)
-          .build();
-      poolBuilder.addNodes(node);
-    }
-
-    return poolBuilder.build();
-  }
-
-  /**
-   * Notify from client when begin/finish operation for container/pipeline
-   * objects on datanodes.
-   * @param type
-   * @param name
-   * @param op
-   * @param stage
-   */
-  @Override
-  public void notifyObjectStageChange(
-      ObjectStageChangeRequestProto.Type type, String name,
-      ObjectStageChangeRequestProto.Op op,
-      ObjectStageChangeRequestProto.Stage stage) throws IOException {
-
-    LOG.info("Object type {} name {} op {} new stage {}",
-        type, name, op, stage);
-    if (type == ObjectStageChangeRequestProto.Type.container) {
-      if (op == ObjectStageChangeRequestProto.Op.create) {
-        if (stage == ObjectStageChangeRequestProto.Stage.begin) {
-          scmContainerManager.updateContainerState(name,
-              HddsProtos.LifeCycleEvent.CREATE);
-        } else {
-          scmContainerManager.updateContainerState(name,
-              HddsProtos.LifeCycleEvent.CREATED);
-        }
-      } else if (op == ObjectStageChangeRequestProto.Op.close) {
-        if (stage == ObjectStageChangeRequestProto.Stage.begin) {
-          scmContainerManager.updateContainerState(name,
-              HddsProtos.LifeCycleEvent.FINALIZE);
-        } else {
-          scmContainerManager.updateContainerState(name,
-              HddsProtos.LifeCycleEvent.CLOSE);
-        }
-      }
-    } //else if (type == ObjectStageChangeRequestProto.Type.pipeline) {
-    // TODO: pipeline state update will be addressed in future patch.
-    //}
-  }
-
-  /**
-   * Creates a replication pipeline of a specified type.
-   */
-  @Override
-  public Pipeline createReplicationPipeline(
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor factor,
-      HddsProtos.NodePool nodePool)
-      throws IOException {
-     // TODO: will be addressed in future patch.
-    return null;
-  }
-
-  /**
-   * Queries a list of Node that match a set of statuses.
-   * <p>
-   * For example, if the nodeStatuses is HEALTHY and RAFT_MEMBER,
-   * then this call will return all healthy nodes which members in
-   * Raft pipeline.
-   * <p>
-   * Right now we don't support operations, so we assume it is an AND operation
-   * between the operators.
-   *
-   * @param nodeStatuses - A set of NodeStates.
-   * @return List of Datanodes.
-   */
-
-  public List<DatanodeDetails> queryNode(EnumSet<NodeState> nodeStatuses) {
-    Preconditions.checkNotNull(nodeStatuses, "Node Query set cannot be null");
-    Preconditions.checkState(nodeStatuses.size() > 0, "No valid arguments " +
-        "in the query set");
-    List<DatanodeDetails> resultList = new LinkedList<>();
-    Set<DatanodeDetails> currentSet = new TreeSet<>();
-
-    for (NodeState nodeState : nodeStatuses) {
-      Set<DatanodeDetails> nextSet = queryNodeState(nodeState);
-      if ((nextSet == null) || (nextSet.size() == 0)) {
-        // Right now we only support AND operation. So intersect with
-        // any empty set is null.
-        return resultList;
-      }
-      // First time we have to add all the elements, next time we have to
-      // do an intersection operation on the set.
-      if (currentSet.size() == 0) {
-        currentSet.addAll(nextSet);
-      } else {
-        currentSet.retainAll(nextSet);
-      }
-    }
-
-    resultList.addAll(currentSet);
-    return resultList;
-  }
-
-  /**
-   * Query the System for Nodes.
-   *
-   * @param nodeState - NodeState that we are interested in matching.
-   * @return Set of Datanodes that match the NodeState.
-   */
-  private Set<DatanodeDetails> queryNodeState(NodeState nodeState) {
-    if (nodeState == NodeState.RAFT_MEMBER ||
-        nodeState == NodeState.FREE_NODE) {
-      throw new IllegalStateException("Not implemented yet");
-    }
-    Set<DatanodeDetails> returnSet = new TreeSet<>();
-    List<DatanodeDetails> tmp = getScmNodeManager().getNodes(nodeState);
-    if ((tmp != null) && (tmp.size() > 0)) {
-      returnSet.addAll(tmp);
-    }
-    return returnSet;
-  }
-
-  /**
-   * Asks SCM where a container should be allocated. SCM responds with the set
-   * of datanodes that should be used creating this container.
-   *
-   * @param containerName - Name of the container.
-   * @param replicationFactor - replication factor.
-   * @return pipeline
-   * @throws IOException
-   */
-  @Override
-  public Pipeline allocateContainer(HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor, String containerName,
-      String owner) throws IOException {
-
-    checkAdminAccess();
-    return scmContainerManager
-        .allocateContainer(replicationType, replicationFactor, containerName,
-            owner).getPipeline();
-  }
-
-  /**
-   * Returns listening address of StorageLocation Protocol RPC server.
-   *
-   * @return listen address of StorageLocation RPC server
-   */
-  @VisibleForTesting
-  public InetSocketAddress getClientRpcAddress() {
-    return clientRpcAddress;
-  }
-
-  @Override
-  public String getClientRpcPort() {
-    InetSocketAddress addr = getClientRpcAddress();
-    return addr == null ? "0" : Integer.toString(addr.getPort());
-  }
-
-  /**
-   * Returns listening address of StorageDatanode Protocol RPC server.
-   *
-   * @return Address where datanode are communicating.
-   */
-  public InetSocketAddress getDatanodeRpcAddress() {
-    return datanodeRpcAddress;
-  }
-
-  @Override
-  public String getDatanodeRpcPort() {
-    InetSocketAddress addr = getDatanodeRpcAddress();
-    return addr == null ? "0" : Integer.toString(addr.getPort());
-  }
-
-  /**
-   * Start service.
-   */
-  public void start() throws IOException {
-    LOG.info(buildRpcServerStartMessage(
-        "StorageContainerLocationProtocol RPC server", clientRpcAddress));
-    DefaultMetricsSystem.initialize("StorageContainerManager");
-    clientRpcServer.start();
-    LOG.info(buildRpcServerStartMessage(
-        "ScmBlockLocationProtocol RPC server", blockRpcAddress));
-    blockRpcServer.start();
-    LOG.info(buildRpcServerStartMessage("RPC server for DataNodes",
-        datanodeRpcAddress));
-    datanodeRpcServer.start();
-    httpServer.start();
-    scmBlockManager.start();
-
-    setStartTime();
-
-  }
-
-  /**
-   * Stop service.
-   */
-  public void stop() {
-    try {
-      LOG.info("Stopping block service RPC server");
-      blockRpcServer.stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager blockRpcServer stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping the StorageContainerLocationProtocol RPC server");
-      clientRpcServer.stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager clientRpcServer stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping the RPC server for DataNodes");
-      datanodeRpcServer.stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager datanodeRpcServer stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping Storage Container Manager HTTP server.");
-      httpServer.stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager HTTP server stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping Block Manager Service.");
-      scmBlockManager.stop();
-    } catch (Exception ex) {
-      LOG.error("SCM block manager service stop failed.", ex);
-    }
-
-    if (containerReportCache != null) {
-      containerReportCache.invalidateAll();
-      containerReportCache.cleanUp();
-    }
-
-    if (metrics != null) {
-      metrics.unRegister();
-    }
-
-    unregisterMXBean();
-    IOUtils.cleanupWithLogger(LOG, scmContainerManager);
-    IOUtils.cleanupWithLogger(LOG, scmNodeManager);
-  }
-
-  /**
-   * Wait until service has completed shutdown.
-   */
-  public void join() {
-    try {
-      blockRpcServer.join();
-      clientRpcServer.join();
-      datanodeRpcServer.join();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.info("Interrupted during StorageContainerManager join.");
-    }
-  }
-
-  /**
-   * Returns SCM version.
-   *
-   * @return Version info.
-   */
-  @Override
-  public SCMVersionResponseProto getVersion(
-      SCMVersionRequestProto versionRequest) throws IOException {
-    return getScmNodeManager().getVersion(versionRequest).getProtobufMessage();
-  }
-
-  /**
-   * Used by data node to send a Heartbeat.
-   *
-   * @param datanodeDetails - Datanode Details.
-   * @param nodeReport - Node Report
-   * @param reportState - Container report ready info.
-   * @return - SCMHeartbeatResponseProto
-   * @throws IOException
-   */
-  @Override
-  public SCMHeartbeatResponseProto sendHeartbeat(
-      DatanodeDetailsProto datanodeDetails, SCMNodeReport nodeReport,
-      ReportState reportState) throws IOException {
-    List<SCMCommand> commands =
-        getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport,
-            reportState);
-    List<SCMCommandResponseProto> cmdResponses = new LinkedList<>();
-    for (SCMCommand cmd : commands) {
-      cmdResponses.add(getCommandResponse(cmd, datanodeDetails.getUuid()
-          .toString()));
-    }
-    return SCMHeartbeatResponseProto.newBuilder().addAllCommands(cmdResponses)
-        .build();
-  }
-
-  /**
-   * Register Datanode.
-   *
-   * @param datanodeDetails - DatanodID.
-   * @param scmAddresses - List of SCMs this datanode is configured to
-   * communicate.
-   * @return SCM Command.
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto
-      register(DatanodeDetailsProto datanodeDetails, String[] scmAddresses) {
-    // TODO : Return the list of Nodes that forms the SCM HA.
-    return getRegisteredResponse(
-        scmNodeManager.register(datanodeDetails), null);
-  }
-
-  /**
-   * Send a container report.
-   *
-   * @param reports -- Container report
-   * @return HeartbeatRespose.nullcommand.
-   * @throws IOException
-   */
-  @Override
-  public ContainerReportsResponseProto sendContainerReport(
-      ContainerReportsRequestProto reports) throws IOException {
-    updateContainerReportMetrics(reports);
-
-    // should we process container reports async?
-    scmContainerManager.processContainerReports(reports);
-    return ContainerReportsResponseProto.newBuilder().build();
-  }
-
-  private void updateContainerReportMetrics(
-      ContainerReportsRequestProto reports) {
-    ContainerStat newStat = null;
-    // TODO: We should update the logic once incremental container report
-    // type is supported.
-    if (reports
-        .getType() == ContainerReportsRequestProto.reportType.fullReport) {
-      newStat = new ContainerStat();
-      for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports
-          .getReportsList()) {
-        newStat.add(new ContainerStat(info.getSize(), info.getUsed(),
-            info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(),
-            info.getReadCount(), info.getWriteCount()));
-      }
-
-      // update container metrics
-      metrics.setLastContainerStat(newStat);
-    }
-
-    // Update container stat entry, this will trigger a removal operation if it
-    // exists in cache.
-    synchronized (containerReportCache) {
-      String datanodeUuid = reports.getDatanodeDetails().getUuid();
-      if (datanodeUuid != null && newStat != null) {
-        containerReportCache.put(datanodeUuid, newStat);
-        // update global view container metrics
-        metrics.incrContainerStat(newStat);
-      }
-    }
-  }
-
-  /**
-   * Handles the block deletion ACKs sent by datanodes. Once ACKs recieved,
-   * SCM considers the blocks are deleted and update the metadata in SCM DB.
-   *
-   * @param acks
-   * @return
-   * @throws IOException
-   */
-  @Override
-  public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(
-      ContainerBlocksDeletionACKProto acks) throws IOException {
-    if (acks.getResultsCount() > 0) {
-      List<DeleteBlockTransactionResult> resultList = acks.getResultsList();
-      for (DeleteBlockTransactionResult result : resultList) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Got block deletion ACK from datanode, TXIDs={}, "
-                  + "success={}", result.getTxID(), result.getSuccess());
-        }
-        if (result.getSuccess()) {
-          LOG.debug("Purging TXID={} from block deletion log",
-              result.getTxID());
-          this.getScmBlockManager().getDeletedBlockLog()
-              .commitTransactions(Collections.singletonList(result.getTxID()));
-        } else {
-          LOG.warn("Got failed ACK for TXID={}, prepare to resend the "
-              + "TX in next interval", result.getTxID());
-        }
-      }
-    }
-    return ContainerBlocksDeletionACKResponseProto.newBuilder()
-        .getDefaultInstanceForType();
-  }
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   *
-   * @param nodestate Healthy, Dead etc.
-   * @return int -- count
-   */
-  public int getNodeCount(NodeState nodestate) {
-    return scmNodeManager.getNodeCount(nodestate);
-  }
-
-  /**
-   * Returns SCM container manager.
-   */
-  @VisibleForTesting
-  public Mapping getScmContainerManager() {
-    return scmContainerManager;
-  }
-
-  /**
-   * Returns node manager.
-   * @return - Node Manager
-   */
-  @VisibleForTesting
-  public NodeManager getScmNodeManager() {
-    return scmNodeManager;
-  }
-
-  @VisibleForTesting
-  public BlockManager getScmBlockManager() {
-    return scmBlockManager;
-  }
-
-  /**
-   * Get block locations.
-   * @param keys batch of block keys to retrieve.
-   * @return set of allocated blocks.
-   * @throws IOException
-   */
-  @Override
-  public Set<AllocatedBlock> getBlockLocations(final Set<String> keys)
-      throws IOException {
-    Set<AllocatedBlock> locatedBlocks = new HashSet<>();
-    for (String key: keys) {
-      Pipeline pipeline = scmBlockManager.getBlock(key);
-      AllocatedBlock block = new AllocatedBlock.Builder()
-          .setKey(key)
-          .setPipeline(pipeline).build();
-      locatedBlocks.add(block);
-    }
-    return locatedBlocks;
-  }
-
-  /**
-   * Asks SCM where a block should be allocated. SCM responds with the set of
-   * datanodes that should be used creating this block.
-   *
-   * @param size - size of the block.
-   * @param type - Replication type.
-   * @param factor
-   * @return allocated block accessing info (key, pipeline).
-   * @throws IOException
-   */
-  @Override
-  public AllocatedBlock allocateBlock(long size,
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner) throws IOException {
-    return scmBlockManager.allocateBlock(size, type, factor, owner);
-  }
-
-  /**
-   * Get the clusterId and SCM Id from the version file in SCM.
-   */
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    ScmInfo.Builder builder = new ScmInfo.Builder()
-        .setClusterId(scmStorage.getClusterID())
-        .setScmId(scmStorage.getScmId());
-    return builder.build();
-  }
-  /**
-   * Delete blocks for a set of object keys.
-   *
-   * @param keyBlocksInfoList list of block keys with object keys to delete.
-   * @return deletion results.
-   */
-  public List<DeleteBlockGroupResult> deleteKeyBlocks(
-      List<BlockGroup> keyBlocksInfoList) throws IOException {
-    LOG.info("SCM is informed by KSM to delete {} blocks",
-        keyBlocksInfoList.size());
-    List<DeleteBlockGroupResult> results = new ArrayList<>();
-    for (BlockGroup keyBlocks : keyBlocksInfoList) {
-      Result resultCode;
-      try {
-        // We delete blocks in an atomic operation to prevent getting
-        // into state like only a partial of blocks are deleted,
-        // which will leave key in an inconsistent state.
-        scmBlockManager.deleteBlocks(keyBlocks.getBlockIDList());
-        resultCode = Result.success;
-      } catch (SCMException scmEx) {
-        LOG.warn("Fail to delete block: {}", keyBlocks.getGroupID(), scmEx);
-        switch (scmEx.getResult()) {
-        case CHILL_MODE_EXCEPTION:
-          resultCode = Result.chillMode;
-          break;
-        case FAILED_TO_FIND_BLOCK:
-          resultCode = Result.errorNotFound;
-          break;
-        default:
-          resultCode = Result.unknownFailure;
-        }
-      } catch (IOException ex) {
-        LOG.warn("Fail to delete blocks for object key: {}",
-            keyBlocks.getGroupID(), ex);
-        resultCode = Result.unknownFailure;
-      }
-      List<DeleteBlockResult> blockResultList = new ArrayList<>();
-      for (String blockKey : keyBlocks.getBlockIDList()) {
-        blockResultList.add(new DeleteBlockResult(blockKey, resultCode));
-      }
-      results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),
-          blockResultList));
-    }
-    return results;
-  }
-
-  @VisibleForTesting
-  public String getPpcRemoteUsername() {
-    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
-    return user == null ? null : user.getUserName();
-  }
-
-  private void checkAdminAccess() throws IOException {
-    String remoteUser = getPpcRemoteUsername();
-    if(remoteUser != null) {
-      if (!scmAdminUsernames.contains(remoteUser)) {
-        throw new IOException(
-            "Access denied for user " + remoteUser
-                + ". Superuser privilege is required.");
-      }
-    }
-  }
-
-  /**
-   * Initialize SCM metrics.
-   */
-  public static void initMetrics() {
-    metrics = SCMMetrics.create();
-  }
-
-  /**
-   * Return SCM metrics instance.
-   */
-  public static SCMMetrics getMetrics() {
-    return metrics == null ? SCMMetrics.create() : metrics;
-  }
-
-  /**
-   * Invalidate container stat entry for given datanode.
-   *
-   * @param datanodeUuid
-   */
-  public void removeContainerReport(String datanodeUuid) {
-    synchronized (containerReportCache) {
-      containerReportCache.invalidate(datanodeUuid);
-    }
-  }
-
-  /**
-   * Get container stat of specified datanode.
-   *
-   * @param datanodeUuid
-   * @return
-   */
-  public ContainerStat getContainerReport(String datanodeUuid) {
-    ContainerStat stat = null;
-    synchronized (containerReportCache) {
-      stat = containerReportCache.getIfPresent(datanodeUuid);
-    }
-
-    return stat;
-  }
-
-  /**
-   * Returns a view of the container stat entries. Modifications made to the
-   * map will directly affect the cache.
-   *
-   * @return
-   */
-  public ConcurrentMap<String, ContainerStat> getContainerReportCache() {
-    return containerReportCache.asMap();
-  }
-
-  @Override
-  public Map<String, String> getContainerReport() {
-    Map<String, String> id2StatMap = new HashMap<>();
-    synchronized (containerReportCache) {
-      ConcurrentMap<String, ContainerStat> map = containerReportCache.asMap();
-      for (Map.Entry<String, ContainerStat> entry : map.entrySet()) {
-        id2StatMap.put(entry.getKey(), entry.getValue().toJsonString());
-      }
-    }
-
-    return id2StatMap;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManagerHttpServer.java
deleted file mode 100644
index 1ca059c..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManagerHttpServer.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-
-import java.io.IOException;
-
-/**
- * HttpServer2 wrapper for the Ozone Storage Container Manager.
- */
-public class StorageContainerManagerHttpServer extends BaseHttpServer {
-
-  public StorageContainerManagerHttpServer(Configuration conf)
-      throws IOException {
-    super(conf, "scm");
-  }
-
-  @Override protected String getHttpAddressKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpBindHostKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override protected String getHttpsAddressKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpsBindHostKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override protected String getBindHostDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override protected int getHttpBindPortDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected int getHttpsBindPortDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected String getKeytabFile() {
-    return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE;
-  }
-
-  @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
-  }
-
-  @Override protected String getEnabledKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_ENABLED_KEY;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 6857b11..c72e2a1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -21,7 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
@@ -849,10 +849,11 @@ public class SCMNodeManager
               .setNodeReport(nodeReport)
               .setContainerReportState(containerReportState)
               .build());
+      return commandQueue.getCommand(datanodeDetails.getUuid());
     } else {
       LOG.error("Datanode ID in heartbeat is null");
     }
-    return commandQueue.getCommand(datanodeDetails.getUuid());
+    return null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
new file mode 100644
index 0000000..e0560a1
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
+
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.protocolPB
+    .ScmBlockLocationProtocolServerSideTranslatorPB;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
+import static org.apache.hadoop.hdds.scm.server.StorageContainerManager
+    .startRpcServer;
+
+/**
+ * SCM block protocol is the protocol used by Namenode and OzoneManager to get
+ * blocks from the SCM.
+ */
+public class SCMBlockProtocolServer implements ScmBlockLocationProtocol {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMBlockProtocolServer.class);
+
+  private final StorageContainerManager scm;
+  private final OzoneConfiguration conf;
+  private final RPC.Server blockRpcServer;
+  private final InetSocketAddress blockRpcAddress;
+
+  /**
+   * The RPC server that listens to requests from block service clients.
+   */
+  public SCMBlockProtocolServer(OzoneConfiguration conf,
+      StorageContainerManager scm) throws IOException {
+    this.scm = scm;
+    this.conf = conf;
+    final int handlerCount =
+        conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
+            OZONE_SCM_HANDLER_COUNT_DEFAULT);
+
+    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    // SCM Block Service RPC
+    BlockingService blockProtoPbService =
+        ScmBlockLocationProtocolProtos.ScmBlockLocationProtocolService
+            .newReflectiveBlockingService(
+                new ScmBlockLocationProtocolServerSideTranslatorPB(this));
+
+    final InetSocketAddress scmBlockAddress = HddsServerUtil
+        .getScmBlockClientBindAddress(conf);
+    blockRpcServer =
+        startRpcServer(
+            conf,
+            scmBlockAddress,
+            ScmBlockLocationProtocolPB.class,
+            blockProtoPbService,
+            handlerCount);
+    blockRpcAddress =
+        updateRPCListenAddress(
+            conf, OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmBlockAddress,
+            blockRpcServer);
+
+  }
+
+  public RPC.Server getBlockRpcServer() {
+    return blockRpcServer;
+  }
+
+  public InetSocketAddress getBlockRpcAddress() {
+    return blockRpcAddress;
+  }
+
+  public void start() {
+    LOG.info(
+        StorageContainerManager.buildRpcServerStartMessage(
+            "RPC server for Block Protocol", getBlockRpcAddress()));
+    getBlockRpcServer().start();
+  }
+
+  public void stop() {
+    try {
+      LOG.info("Stopping the RPC server for Block Protocol");
+      getBlockRpcServer().stop();
+    } catch (Exception ex) {
+      LOG.error("Block Protocol RPC stop failed.", ex);
+    }
+    IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
+  }
+
+  public void join() throws InterruptedException {
+    LOG.trace("Join RPC server for Block Protocol");
+    getBlockRpcServer().join();
+  }
+
+  @Override
+  public Set<AllocatedBlock> getBlockLocations(Set<String> keys) throws
+      IOException {
+    Set<AllocatedBlock> locatedBlocks = new HashSet<>();
+    for (String key : keys) {
+      Pipeline pipeline = scm.getScmBlockManager().getBlock(key);
+      AllocatedBlock block = new AllocatedBlock.Builder().setKey(key)
+          .setPipeline(pipeline).build();
+      locatedBlocks.add(block);
+    }
+    return locatedBlocks;
+
+  }
+
+  @Override
+  public AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType
+      type, HddsProtos.ReplicationFactor factor, String owner) throws
+      IOException {
+    return scm.getScmBlockManager().allocateBlock(size, type, factor, owner);
+  }
+
+  /**
+   * Delete blocks for a set of object keys.
+   *
+   * @param keyBlocksInfoList list of block keys with object keys to delete.
+   * @return deletion results.
+   */
+  @Override
+  public List<DeleteBlockGroupResult> deleteKeyBlocks(
+      List<BlockGroup> keyBlocksInfoList) throws IOException {
+    LOG.info("SCM is informed by KSM to delete {} blocks", keyBlocksInfoList
+        .size());
+    List<DeleteBlockGroupResult> results = new ArrayList<>();
+    for (BlockGroup keyBlocks : keyBlocksInfoList) {
+      ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result resultCode;
+      try {
+        // We delete blocks in an atomic operation to prevent getting
+        // into state like only a partial of blocks are deleted,
+        // which will leave key in an inconsistent state.
+        scm.getScmBlockManager().deleteBlocks(keyBlocks.getBlockIDList());
+        resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
+            .Result.success;
+      } catch (SCMException scmEx) {
+        LOG.warn("Fail to delete block: {}", keyBlocks.getGroupID(), scmEx);
+        switch (scmEx.getResult()) {
+        case CHILL_MODE_EXCEPTION:
+          resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
+              .Result.chillMode;
+          break;
+        case FAILED_TO_FIND_BLOCK:
+          resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
+              .Result.errorNotFound;
+          break;
+        default:
+          resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
+              .Result.unknownFailure;
+        }
+      } catch (IOException ex) {
+        LOG.warn("Fail to delete blocks for object key: {}", keyBlocks
+            .getGroupID(), ex);
+        resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult
+            .Result.unknownFailure;
+      }
+      List<DeleteBlockResult> blockResultList = new ArrayList<>();
+      for (String blockKey : keyBlocks.getBlockIDList()) {
+        blockResultList.add(new DeleteBlockResult(blockKey, resultCode));
+      }
+      results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),
+          blockResultList));
+    }
+    return results;
+  }
+
+  @Override
+  public ScmInfo getScmInfo() throws IOException {
+    ScmInfo.Builder builder =
+        new ScmInfo.Builder()
+            .setClusterId(scm.getScmStorage().getClusterID())
+            .setScmId(scm.getScmStorage().getScmId());
+    return builder.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
new file mode 100644
index 0000000..42cce2f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -0,0 +1,314 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.protocolPB
+    .StorageContainerLocationProtocolServerSideTranslatorPB;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.EnumSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos
+    .StorageContainerLocationProtocolService.newReflectiveBlockingService;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CLIENT_ADDRESS_KEY;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
+import static org.apache.hadoop.hdds.scm.server.StorageContainerManager
+    .startRpcServer;
+
+/**
+ * The RPC server that listens to requests from clients.
+ */
+public class SCMClientProtocolServer implements
+    StorageContainerLocationProtocol {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMClientProtocolServer.class);
+  private final RPC.Server clientRpcServer;
+  private final InetSocketAddress clientRpcAddress;
+  private final StorageContainerManager scm;
+  private final OzoneConfiguration conf;
+
+  public SCMClientProtocolServer(OzoneConfiguration conf,
+      StorageContainerManager scm) throws IOException {
+    this.scm = scm;
+    this.conf = conf;
+    final int handlerCount =
+        conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
+            OZONE_SCM_HANDLER_COUNT_DEFAULT);
+    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+
+    // SCM Container Service RPC
+    BlockingService storageProtoPbService =
+        newReflectiveBlockingService(
+            new StorageContainerLocationProtocolServerSideTranslatorPB(this));
+
+    final InetSocketAddress scmAddress = HddsServerUtil
+        .getScmClientBindAddress(conf);
+    clientRpcServer =
+        startRpcServer(
+            conf,
+            scmAddress,
+            StorageContainerLocationProtocolPB.class,
+            storageProtoPbService,
+            handlerCount);
+    clientRpcAddress =
+        updateRPCListenAddress(conf, OZONE_SCM_CLIENT_ADDRESS_KEY,
+            scmAddress, clientRpcServer);
+
+  }
+
+  public RPC.Server getClientRpcServer() {
+    return clientRpcServer;
+  }
+
+  public InetSocketAddress getClientRpcAddress() {
+    return clientRpcAddress;
+  }
+
+  public void start() {
+    LOG.info(
+        StorageContainerManager.buildRpcServerStartMessage(
+            "RPC server for Client ", getClientRpcAddress()));
+    getClientRpcServer().start();
+  }
+
+  public void stop() {
+    try {
+      LOG.info("Stopping the RPC server for Client Protocol");
+      getClientRpcServer().stop();
+    } catch (Exception ex) {
+      LOG.error("Client Protocol RPC stop failed.", ex);
+    }
+    IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
+  }
+
+  public void join() throws InterruptedException {
+    LOG.trace("Join RPC server for Client Protocol");
+    getClientRpcServer().join();
+  }
+
+  @Override
+  public Pipeline allocateContainer(HddsProtos.ReplicationType
+      replicationType, HddsProtos.ReplicationFactor factor, String
+      containerName, String owner) throws IOException {
+    scm.checkAdminAccess();
+    return scm.getScmContainerManager()
+        .allocateContainer(replicationType, factor, containerName, owner)
+        .getPipeline();
+  }
+
+  @Override
+  public Pipeline getContainer(String containerName) throws IOException {
+    return scm.getScmContainerManager()
+        .getContainer(containerName).getPipeline();
+  }
+
+  @Override
+  public List<ContainerInfo> listContainer(String startName,
+      String prefixName, int count) throws IOException {
+    return scm.getScmContainerManager()
+        .listContainer(startName, prefixName, count);
+  }
+
+  @Override
+  public void deleteContainer(String containerName) throws IOException {
+    scm.checkAdminAccess();
+    scm.getScmContainerManager().deleteContainer(containerName);
+
+  }
+
+  @Override
+  public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
+      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) throws
+      IOException {
+
+    if (queryScope == HddsProtos.QueryScope.POOL) {
+      throw new IllegalArgumentException("Not Supported yet");
+    }
+
+    List<DatanodeDetails> datanodes = queryNode(nodeStatuses);
+    HddsProtos.NodePool.Builder poolBuilder = HddsProtos.NodePool.newBuilder();
+
+    for (DatanodeDetails datanode : datanodes) {
+      HddsProtos.Node node =
+          HddsProtos.Node.newBuilder()
+              .setNodeID(datanode.getProtoBufMessage())
+              .addAllNodeStates(nodeStatuses)
+              .build();
+      poolBuilder.addNodes(node);
+    }
+
+    return poolBuilder.build();
+
+  }
+
+  @Override
+  public void notifyObjectStageChange(StorageContainerLocationProtocolProtos
+      .ObjectStageChangeRequestProto.Type type, String name,
+      StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto.Op
+          op, StorageContainerLocationProtocolProtos
+      .ObjectStageChangeRequestProto.Stage stage) throws IOException {
+
+    LOG.info("Object type {} name {} op {} new stage {}", type, name, op,
+        stage);
+    if (type == StorageContainerLocationProtocolProtos
+        .ObjectStageChangeRequestProto.Type.container) {
+      if (op == StorageContainerLocationProtocolProtos
+          .ObjectStageChangeRequestProto.Op.create) {
+        if (stage == StorageContainerLocationProtocolProtos
+            .ObjectStageChangeRequestProto.Stage.begin) {
+          scm.getScmContainerManager().updateContainerState(name, HddsProtos
+              .LifeCycleEvent.CREATE);
+        } else {
+          scm.getScmContainerManager().updateContainerState(name, HddsProtos
+              .LifeCycleEvent.CREATED);
+        }
+      } else {
+        if (op == StorageContainerLocationProtocolProtos
+            .ObjectStageChangeRequestProto.Op.close) {
+          if (stage == StorageContainerLocationProtocolProtos
+              .ObjectStageChangeRequestProto.Stage.begin) {
+            scm.getScmContainerManager().updateContainerState(name, HddsProtos
+                .LifeCycleEvent.FINALIZE);
+          } else {
+            scm.getScmContainerManager().updateContainerState(name, HddsProtos
+                .LifeCycleEvent.CLOSE);
+          }
+        }
+      }
+    } // else if (type == ObjectStageChangeRequestProto.Type.pipeline) {
+    // TODO: pipeline state update will be addressed in future patch.
+    // }
+
+  }
+
+  @Override
+  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
+      throws IOException {
+    // TODO: will be addressed in future patch.
+    // This is needed only for debugging purposes to make sure cluster is
+    // working correctly. 
+    return null;
+  }
+
+  @Override
+  public ScmInfo getScmInfo() throws IOException {
+    ScmInfo.Builder builder =
+        new ScmInfo.Builder()
+            .setClusterId(scm.getScmStorage().getClusterID())
+            .setScmId(scm.getScmStorage().getScmId());
+    return builder.build();
+  }
+
+  /**
+   * Queries a list of Node that match a set of statuses.
+   *
+   * <p>For example, if the nodeStatuses is HEALTHY and RAFT_MEMBER, then
+   * this call will return all
+   * healthy nodes which members in Raft pipeline.
+   *
+   * <p>Right now we don't support operations, so we assume it is an AND
+   * operation between the
+   * operators.
+   *
+   * @param nodeStatuses - A set of NodeStates.
+   * @return List of Datanodes.
+   */
+  public List<DatanodeDetails> queryNode(EnumSet<HddsProtos.NodeState>
+      nodeStatuses) {
+    Preconditions.checkNotNull(nodeStatuses, "Node Query set cannot be null");
+    Preconditions.checkState(nodeStatuses.size() > 0, "No valid arguments " +
+        "in the query set");
+    List<DatanodeDetails> resultList = new LinkedList<>();
+    Set<DatanodeDetails> currentSet = new TreeSet<>();
+
+    for (HddsProtos.NodeState nodeState : nodeStatuses) {
+      Set<DatanodeDetails> nextSet = queryNodeState(nodeState);
+      if ((nextSet == null) || (nextSet.size() == 0)) {
+        // Right now we only support AND operation. So intersect with
+        // any empty set is null.
+        return resultList;
+      }
+      // First time we have to add all the elements, next time we have to
+      // do an intersection operation on the set.
+      if (currentSet.size() == 0) {
+        currentSet.addAll(nextSet);
+      } else {
+        currentSet.retainAll(nextSet);
+      }
+    }
+
+    resultList.addAll(currentSet);
+    return resultList;
+  }
+
+  /**
+   * Query the System for Nodes.
+   *
+   * @param nodeState - NodeState that we are interested in matching.
+   * @return Set of Datanodes that match the NodeState.
+   */
+  private Set<DatanodeDetails> queryNodeState(HddsProtos.NodeState nodeState) {
+    if (nodeState == HddsProtos.NodeState.RAFT_MEMBER || nodeState ==
+        HddsProtos.NodeState
+        .FREE_NODE) {
+      throw new IllegalStateException("Not implemented yet");
+    }
+    Set<DatanodeDetails> returnSet = new TreeSet<>();
+    List<DatanodeDetails> tmp = scm.getScmNodeManager().getNodes(nodeState);
+    if ((tmp != null) && (tmp.size() > 0)) {
+      returnSet.addAll(tmp);
+    }
+    return returnSet;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-8194. Fixed reinitialization error for LinuxContainerExecutor. Contributed by Chandni Singh

Posted by xk...@apache.org.
YARN-8194.  Fixed reinitialization error for LinuxContainerExecutor.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d280f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d280f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d280f0

Branch: refs/heads/HDFS-12943
Commit: f4d280f02b557885cd5e5cf36abc36eb579ccfb4
Parents: 6b63a0a
Author: Eric Yang <ey...@apache.org>
Authored: Wed May 2 20:07:19 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Wed May 2 20:07:19 2018 -0400

----------------------------------------------------------------------
 .../launcher/ContainerLaunch.java               | 37 ++++++++++++++++++++
 .../launcher/ContainerRelaunch.java             | 36 ++-----------------
 2 files changed, 39 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d280f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 9efe686..fa77899 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+
+import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -844,6 +846,7 @@ public class ContainerLaunch implements Callable<Integer> {
       throw new IOException("Reap container failed for container "
           + containerIdStr);
     }
+    cleanupContainerFiles(getContainerWorkDir());
   }
 
   /**
@@ -1858,4 +1861,38 @@ public class ContainerLaunch implements Callable<Integer> {
       context.getNMStateStore().storeContainerWorkDir(containerId, workDir);
     }
   }
+
+  protected Path getContainerWorkDir() throws IOException {
+    String containerWorkDir = container.getWorkDir();
+    if (containerWorkDir == null
+        || !dirsHandler.isGoodLocalDir(containerWorkDir)) {
+      throw new IOException(
+          "Could not find a good work dir " + containerWorkDir
+              + " for container " + container);
+    }
+
+    return new Path(containerWorkDir);
+  }
+
+  /**
+   * Clean up container's files for container relaunch or cleanup.
+   */
+  protected void cleanupContainerFiles(Path containerWorkDir) {
+    LOG.debug("cleanup container {} files", containerWorkDir);
+    // delete ContainerScriptPath
+    deleteAsUser(new Path(containerWorkDir, CONTAINER_SCRIPT));
+    // delete TokensPath
+    deleteAsUser(new Path(containerWorkDir, FINAL_CONTAINER_TOKENS_FILE));
+  }
+
+  private void deleteAsUser(Path path) {
+    try {
+      exec.deleteAsUser(new DeletionAsUserContext.Builder()
+          .setUser(container.getUser())
+          .setSubDir(path)
+          .build());
+    } catch (Exception e) {
+      LOG.warn("Failed to delete " + path, e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d280f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
index c6e3ed4..f69cf96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
-import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -71,7 +70,8 @@ public class ContainerRelaunch extends ContainerLaunch {
     Path containerLogDir;
     try {
       Path containerWorkDir = getContainerWorkDir();
-      cleanupPreviousContainerFiles(containerWorkDir);
+      // Clean up container's previous files for container relaunch.
+      cleanupContainerFiles(containerWorkDir);
 
       containerLogDir = getContainerLogDir();
 
@@ -148,17 +148,6 @@ public class ContainerRelaunch extends ContainerLaunch {
     return ret;
   }
 
-  private Path getContainerWorkDir() throws IOException {
-    String containerWorkDir = container.getWorkDir();
-    if (containerWorkDir == null
-        || !dirsHandler.isGoodLocalDir(containerWorkDir)) {
-      throw new IOException(
-          "Could not find a good work dir " + containerWorkDir
-          + " for container " + container);
-    }
-
-    return new Path(containerWorkDir);
-  }
 
   private Path getContainerLogDir() throws IOException {
     String containerLogDir = container.getLogDir();
@@ -190,25 +179,4 @@ public class ContainerRelaunch extends ContainerLaunch {
     return dirsHandler.getLocalPathForRead(
         getPidFileSubpath(appIdStr, containerIdStr));
   }
-
-  /**
-   * Clean up container's previous files for container relaunch.
-   */
-  private void cleanupPreviousContainerFiles(Path containerWorkDir) {
-    // delete ContainerScriptPath
-    deleteAsUser(new Path(containerWorkDir, CONTAINER_SCRIPT));
-    // delete TokensPath
-    deleteAsUser(new Path(containerWorkDir, FINAL_CONTAINER_TOKENS_FILE));
-  }
-
-  private void deleteAsUser(Path path) {
-    try {
-      exec.deleteAsUser(new DeletionAsUserContext.Builder()
-          .setUser(container.getUser())
-          .setSubDir(path)
-          .build());
-    } catch (Exception e) {
-      LOG.warn("Failed to delete " + path, e);
-    }
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HDFS-13286. [SBN read] Add haadmin commands to transition between standby and observer. Contributed by Chao Sun.

Posted by xk...@apache.org.
HDFS-13286. [SBN read] Add haadmin commands to transition between standby and observer. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7f27391
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7f27391
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7f27391

Branch: refs/heads/HDFS-12943
Commit: f7f27391e19a88fd180ddd744686184cd0158690
Parents: a38fde5
Author: Erik Krogen <xk...@apache.org>
Authored: Fri May 4 12:22:12 2018 -0700
Committer: Erik Krogen <xk...@apache.org>
Committed: Fri May 4 12:27:03 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ha/FailoverController.java    |  2 +-
 .../main/java/org/apache/hadoop/ha/HAAdmin.java | 42 +++++++++++++++++++
 .../org/apache/hadoop/ha/HAServiceProtocol.java | 18 ++++++++
 .../hadoop/ha/HAServiceProtocolHelper.java      |  9 ++++
 .../org/apache/hadoop/ha/HAServiceTarget.java   |  7 ++++
 ...HAServiceProtocolClientSideTranslatorPB.java | 16 +++++++
 ...HAServiceProtocolServerSideTranslatorPB.java | 20 +++++++++
 .../src/main/proto/HAServiceProtocol.proto      | 20 +++++++++
 .../org/apache/hadoop/ha/DummyHAService.java    | 18 +++++++-
 .../org/apache/hadoop/ha/MiniZKFCCluster.java   |  4 ++
 .../FederationNamenodeServiceState.java         |  3 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  4 ++
 .../hdfs/server/datanode/BPServiceActor.java    |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  3 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   | 27 ++++++++----
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 ++++
 .../hdfs/server/namenode/ha/StandbyState.java   | 12 +++---
 .../hadoop/hdfs/tools/NNHAServiceTarget.java    |  5 +++
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |  1 +
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java       |  6 +++
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   | 44 ++++++++++++++++++++
 .../server/resourcemanager/AdminService.java    |  7 ++++
 22 files changed, 258 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index b86ae29..4fc52d5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -129,7 +129,7 @@ public class FailoverController {
 
     if (!toSvcStatus.getState().equals(HAServiceState.STANDBY)) {
       throw new FailoverFailedException(
-          "Can't failover to an active service");
+          "Can't failover to an " + toSvcStatus.getState() + " service");
     }
     
     if (!toSvcStatus.isReadyToBecomeActive()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 9b7d7ba..61700f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -72,6 +72,9 @@ public abstract class HAAdmin extends Configured implements Tool {
         new UsageInfo("[--"+FORCEACTIVE+"] <serviceId>", "Transitions the service into Active state"))
     .put("-transitionToStandby",
         new UsageInfo("<serviceId>", "Transitions the service into Standby state"))
+      .put("-transitionToObserver",
+          new UsageInfo("<serviceId>",
+              "Transitions the service into Observer state"))
     .put("-failover",
         new UsageInfo("[--"+FORCEFENCE+"] [--"+FORCEACTIVE+"] <serviceId> <serviceId>",
             "Failover from the first service to the second.\n" +
@@ -221,6 +224,28 @@ public abstract class HAAdmin extends Configured implements Tool {
     HAServiceProtocolHelper.transitionToStandby(proto, createReqInfo());
     return 0;
   }
+
+  private int transitionToObserver(final CommandLine cmd)
+      throws IOException, ServiceFailedException {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
+      errOut.println("transitionToObserver: incorrect number of arguments");
+      printUsage(errOut, "-transitionToObserver");
+      return -1;
+    }
+
+    HAServiceTarget target = resolveTarget(argv[0]);
+    if (!checkSupportObserver(target)) {
+      return -1;
+    }
+    if (!checkManualStateManagementOK(target)) {
+      return -1;
+    }
+    HAServiceProtocol proto = target.getProxy(getConf(), 0);
+    HAServiceProtocolHelper.transitionToObserver(proto, createReqInfo());
+    return 0;
+  }
+
   /**
    * Ensure that we are allowed to manually manage the HA state of the target
    * service. If automatic failover is configured, then the automatic
@@ -249,6 +274,21 @@ public abstract class HAAdmin extends Configured implements Tool {
     return true;
   }
 
+  /**
+   * Check if the target supports the Observer state.
+   * @param target the target to check
+   * @return true if the target support Observer state, false otherwise.
+   */
+  private boolean checkSupportObserver(HAServiceTarget target) {
+    if (target.supportObserver()) {
+      return true;
+    } else {
+      errOut.println(
+          "The target " + target + " doesn't support Observer state.");
+      return false;
+    }
+  }
+
   private StateChangeRequestInfo createReqInfo() {
     return new StateChangeRequestInfo(requestSource);
   }
@@ -461,6 +501,8 @@ public abstract class HAAdmin extends Configured implements Tool {
       return transitionToActive(cmdLine);
     } else if ("-transitionToStandby".equals(cmd)) {
       return transitionToStandby(cmdLine);
+    } else if ("-transitionToObserver".equals(cmd)) {
+      return transitionToObserver(cmdLine);
     } else if ("-failover".equals(cmd)) {
       return failover(cmdLine);
     } else if ("-getServiceState".equals(cmd)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
index 7099de8..74a3d12 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
@@ -51,6 +51,7 @@ public interface HAServiceProtocol {
     INITIALIZING("initializing"),
     ACTIVE("active"),
     STANDBY("standby"),
+    OBSERVER("observer"),
     STOPPING("stopping");
 
     private String name;
@@ -149,6 +150,23 @@ public interface HAServiceProtocol {
                                            IOException;
 
   /**
+   * Request service to transition to observer state. No operation, if the
+   * service is already in observer state.
+   *
+   * @throws ServiceFailedException
+   *           if transition from standby to observer fails.
+   * @throws AccessControlException
+   *           if access is denied.
+   * @throws IOException
+   *           if other errors happen
+   */
+  @Idempotent
+  void transitionToObserver(StateChangeRequestInfo reqInfo)
+                              throws ServiceFailedException,
+                                     AccessControlException,
+                                     IOException;
+
+  /**
    * Return the current status of the service. The status indicates
    * the current <em>state</em> (e.g ACTIVE/STANDBY) as well as
    * some additional information.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
index 58d4a7f..a2441fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
@@ -60,4 +60,13 @@ public class HAServiceProtocolHelper {
       throw e.unwrapRemoteException(ServiceFailedException.class);
     }
   }
+
+  public static void transitionToObserver(HAServiceProtocol svc,
+      StateChangeRequestInfo reqInfo) throws IOException {
+    try {
+      svc.transitionToObserver(reqInfo);
+    } catch (RemoteException e) {
+      throw e.unwrapRemoteException(ServiceFailedException.class);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
index 98aab99..4a2a21b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
@@ -170,4 +170,11 @@ public abstract class HAServiceTarget {
   public boolean isAutoFailoverEnabled() {
     return false;
   }
+
+  /**
+   * @return true if this target supports the Observer state, false otherwise.
+   */
+  public boolean supportObserver() {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
index 589ccd1..fec519f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolTranslator;
@@ -116,6 +117,19 @@ public class HAServiceProtocolClientSideTranslatorPB implements
   }
 
   @Override
+  public void transitionToObserver(StateChangeRequestInfo reqInfo)
+      throws IOException {
+    try {
+      TransitionToObserverRequestProto req =
+          TransitionToObserverRequestProto.newBuilder()
+              .setReqInfo(convert(reqInfo)).build();
+      rpcProxy.transitionToObserver(NULL_CONTROLLER, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public HAServiceStatus getServiceStatus() throws IOException {
     GetServiceStatusResponseProto status;
     try {
@@ -141,6 +155,8 @@ public class HAServiceProtocolClientSideTranslatorPB implements
       return HAServiceState.ACTIVE;
     case STANDBY:
       return HAServiceState.STANDBY;
+    case OBSERVER:
+      return HAServiceState.OBSERVER;
     case INITIALIZING:
     default:
       return HAServiceState.INITIALIZING;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
index 7f75582..72787cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequ
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 
@@ -61,6 +63,9 @@ public class HAServiceProtocolServerSideTranslatorPB implements
       TransitionToActiveResponseProto.newBuilder().build();
   private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP = 
       TransitionToStandbyResponseProto.newBuilder().build();
+  private static final TransitionToObserverResponseProto
+      TRANSITION_TO_OBSERVER_RESP =
+      TransitionToObserverResponseProto.newBuilder().build();
   private static final Logger LOG = LoggerFactory.getLogger(
       HAServiceProtocolServerSideTranslatorPB.class);
   
@@ -124,6 +129,18 @@ public class HAServiceProtocolServerSideTranslatorPB implements
   }
 
   @Override
+  public TransitionToObserverResponseProto transitionToObserver(
+      RpcController controller, TransitionToObserverRequestProto request)
+      throws ServiceException {
+    try {
+      server.transitionToObserver(convert(request.getReqInfo()));
+      return TRANSITION_TO_OBSERVER_RESP;
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public GetServiceStatusResponseProto getServiceStatus(RpcController controller,
       GetServiceStatusRequestProto request) throws ServiceException {
     HAServiceStatus s;
@@ -141,6 +158,9 @@ public class HAServiceProtocolServerSideTranslatorPB implements
     case STANDBY:
       retState = HAServiceStateProto.STANDBY;
       break;
+    case OBSERVER:
+      retState = HAServiceStateProto.OBSERVER;
+      break;
     case INITIALIZING:
     default:
       retState = HAServiceStateProto.INITIALIZING;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
index e0060f2..16ee9a2 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
@@ -32,6 +32,7 @@ enum HAServiceStateProto {
   INITIALIZING = 0;
   ACTIVE = 1;
   STANDBY = 2;
+  OBSERVER = 3;
 }
 
 enum HARequestSource {
@@ -85,6 +86,19 @@ message TransitionToStandbyResponseProto {
 /**
  * void request
  */
+message TransitionToObserverRequestProto {
+  required HAStateChangeRequestInfoProto reqInfo = 1;
+}
+
+/**
+ * void response
+ */
+message TransitionToObserverResponseProto {
+}
+
+/**
+ * void request
+ */
 message GetServiceStatusRequestProto { 
 }
 
@@ -127,6 +141,12 @@ service HAServiceProtocolService {
       returns(TransitionToStandbyResponseProto);
 
   /**
+   * Request service to transition to observer state.
+   */
+  rpc transitionToObserver(TransitionToObserverRequestProto)
+      returns(TransitionToObserverResponseProto);
+
+  /**
    * Get the current status of the service.
    */
   rpc getServiceStatus(GetServiceStatusRequestProto)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
index 6f01be8..51112be 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
@@ -56,7 +56,8 @@ class DummyHAService extends HAServiceTarget {
   InetSocketAddress address, healthMonitorAddress;
   boolean isHealthy = true;
   boolean actUnreachable = false;
-  boolean failToBecomeActive, failToBecomeStandby, failToFence;
+  boolean failToBecomeActive, failToBecomeStandby, failToBecomeObserver,
+      failToFence;
   
   DummySharedResource sharedResource;
   public int fenceCount = 0;
@@ -217,6 +218,11 @@ class DummyHAService extends HAServiceTarget {
   }
 
   @Override
+  public boolean supportObserver() {
+    return true;
+  }
+
+  @Override
   public String toString() {
     return "DummyHAService #" + index;
   }
@@ -264,6 +270,16 @@ class DummyHAService extends HAServiceTarget {
     }
     
     @Override
+    public void transitionToObserver(StateChangeRequestInfo req)
+        throws ServiceFailedException, AccessControlException, IOException {
+      checkUnreachable();
+      if (failToBecomeObserver) {
+        throw new ServiceFailedException("injected failure");
+      }
+      state = HAServiceState.OBSERVER;
+    }
+
+    @Override
     public HAServiceStatus getServiceStatus() throws IOException {
       checkUnreachable();
       HAServiceStatus ret = new HAServiceStatus(state);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
index 9146e01..f63d267 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
@@ -187,6 +187,10 @@ public class MiniZKFCCluster {
     svcs.get(idx).actUnreachable = unreachable;
   }
 
+  public void setFailToBecomeObserver(int idx, boolean doFail) {
+    svcs.get(idx).failToBecomeObserver = doFail;
+  }
+
   /**
    * Wait for the given HA service to enter the given HA state.
    * This is based on the state of ZKFC, not the state of HA service.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
index 7907e30..ed8f8c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
@@ -35,6 +35,9 @@ public enum FederationNamenodeServiceState {
     case ACTIVE:
       return FederationNamenodeServiceState.ACTIVE;
     case STANDBY:
+    // TODO: we should probably have a separate state OBSERVER for RBF and
+    // treat it differently.
+    case OBSERVER:
       return FederationNamenodeServiceState.STANDBY;
     case INITIALIZING:
       return FederationNamenodeServiceState.UNAVAILABLE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index ac01348..baec6fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -779,6 +779,8 @@ public class PBHelper {
       return HAServiceState.ACTIVE;
     case STANDBY:
       return HAServiceState.STANDBY;
+    case OBSERVER:
+      return HAServiceState.OBSERVER;
     default:
       throw new IllegalArgumentException("Unexpected HAServiceStateProto:"
           + s);
@@ -794,6 +796,8 @@ public class PBHelper {
       return NNHAStatusHeartbeatProto.State.ACTIVE;
     case STANDBY:
       return NNHAStatusHeartbeatProto.State.STANDBY;
+    case OBSERVER:
+      return NNHAStatusHeartbeatProto.State.OBSERVER;
     default:
       throw new IllegalArgumentException("Unexpected HAServiceState:"
           + s);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index a94d2df..b7d67c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -904,7 +904,7 @@ class BPServiceActor implements Runnable {
       scheduler.scheduleHeartbeat();
       // HDFS-9917,Standby NN IBR can be very huge if standby namenode is down
       // for sometime.
-      if (state == HAServiceState.STANDBY) {
+      if (state == HAServiceState.STANDBY || state == HAServiceState.OBSERVER) {
         ibrManager.clearIBRs();
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ddb7797..d9c8211 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1710,7 +1710,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       return haEnabled;
     }
 
-    return HAServiceState.STANDBY == haContext.getState().getServiceState();
+    return HAServiceState.STANDBY == haContext.getState().getServiceState() ||
+        HAServiceState.OBSERVER == haContext.getState().getServiceState();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 6586237..e002323 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1749,25 +1749,35 @@ public class NameNode extends ReconfigurableBase implements
       throw new ServiceFailedException("HA for namenode is not enabled");
     }
     if (state == OBSERVER_STATE) {
-      // TODO: we may need to remove this when enabling failover for observer
       throw new ServiceFailedException(
-          "Cannot transition from Observer to Active");
+          "Cannot transition from '" + OBSERVER_STATE + "' to '" +
+              ACTIVE_STATE + "'");
     }
     state.setState(haContext, ACTIVE_STATE);
   }
-  
-  synchronized void transitionToStandby() 
+
+  synchronized void transitionToStandby()
       throws ServiceFailedException, AccessControlException {
     namesystem.checkSuperuserPrivilege();
     if (!haEnabled) {
       throw new ServiceFailedException("HA for namenode is not enabled");
     }
-    if (state == OBSERVER_STATE) {
-      // TODO: we may need to remove this when enabling failover for observer
+    state.setState(haContext, STANDBY_STATE);
+  }
+
+  synchronized void transitionToObserver()
+      throws ServiceFailedException, AccessControlException {
+    namesystem.checkSuperuserPrivilege();
+    if (!haEnabled) {
+      throw new ServiceFailedException("HA for namenode is not enabled");
+    }
+    // Transition from ACTIVE to OBSERVER is forbidden.
+    if (state == ACTIVE_STATE) {
       throw new ServiceFailedException(
-          "Cannot transition from Observer to Standby");
+          "Cannot transition from '" + ACTIVE_STATE + "' to '" +
+              OBSERVER_STATE + "'");
     }
-    state.setState(haContext, STANDBY_STATE);
+    state.setState(haContext, OBSERVER_STATE);
   }
 
   synchronized HAServiceStatus getServiceStatus()
@@ -1822,7 +1832,6 @@ public class NameNode extends ReconfigurableBase implements
 
   @Override // NameNodeStatusMXBean
   public String getState() {
-    // TODO: maybe we should return a different result for observer namenode?
     String servStateStr = "";
     HAServiceState servState = getServiceState();
     if (null != servState) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 8b20d57..5d65e69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1727,6 +1727,14 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override // HAServiceProtocol
+  public synchronized void transitionToObserver(StateChangeRequestInfo req)
+      throws ServiceFailedException, AccessControlException, IOException {
+    checkNNStartup();
+    nn.checkHaStateChange(req);
+    nn.transitionToObserver();
+  }
+
+  @Override // HAServiceProtocol
   public synchronized HAServiceStatus getServiceStatus() 
       throws AccessControlException, ServiceFailedException, IOException {
     checkNNStartup();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
index 9a21888..ac3e7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.ipc.StandbyException;
  */
 @InterfaceAudience.Private
 public class StandbyState extends HAState {
+  // TODO: consider implementing a ObserverState instead of using the flag.
   private final boolean isObserver;
 
   public StandbyState() {
@@ -46,21 +47,18 @@ public class StandbyState extends HAState {
   }
 
   public StandbyState(boolean isObserver) {
-    super(HAServiceState.STANDBY);
+    super(isObserver ? HAServiceState.OBSERVER : HAServiceState.STANDBY);
     this.isObserver = isObserver;
   }
 
   @Override
   public void setState(HAContext context, HAState s) throws ServiceFailedException {
-    if (s == NameNode.ACTIVE_STATE) {
+    if (s == NameNode.ACTIVE_STATE ||
+        (!isObserver && s == NameNode.OBSERVER_STATE) ||
+        (isObserver && s == NameNode.STANDBY_STATE)) {
       setStateInternal(context, s);
       return;
     }
-    if (isObserver && s == NameNode.STANDBY_STATE) {
-      // To guard against the exception in the following super call.
-      // The other case, standby -> observer, should not happen.
-      return;
-    }
     super.setState(context, s);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
index a598c3d..c4527e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
@@ -186,4 +186,9 @@ public class NNHAServiceTarget extends HAServiceTarget {
   public boolean isAutoFailoverEnabled() {
     return autoFailoverEnabled;
   }
+
+  @Override
+  public boolean supportObserver() {
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
index e50883a..85cfb6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
@@ -212,6 +212,7 @@ message NNHAStatusHeartbeatProto {
   enum State {
     ACTIVE = 0;
     STANDBY = 1;
+    OBSERVER = 2;
   }
   required State state = 1;
   required uint64 txid = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index f1f5793..d9dba57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -275,6 +275,12 @@ public class TestDFSHAAdmin {
   }
 
   @Test
+  public void testTransitionToObserver() throws Exception {
+    assertEquals(0, runTool("-transitionToObserver", "nn1"));
+    Mockito.verify(mockProtocol).transitionToObserver(anyReqInfo());
+  }
+
+  @Test
   public void testFailoverWithNoFencerConfigured() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     assertEquals(-1, runTool("-failover", "nn1", "nn2"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index a21a31d..c2d6fb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -115,6 +115,50 @@ public class TestDFSHAAdminMiniCluster {
     assertFalse(nnode2.isStandbyState());
     assertEquals(0, runTool("-transitionToStandby", "nn2"));
     assertTrue(nnode2.isStandbyState());
+    assertEquals(0, runTool("-transitionToObserver", "nn2"));
+    assertFalse(nnode2.isStandbyState());
+    assertTrue(nnode2.isObserverState());
+  }
+
+  @Test
+  public void testObserverTransition() throws Exception {
+    NameNode nnode1 = cluster.getNameNode(0);
+    assertTrue(nnode1.isStandbyState());
+
+    // Should be able to transition from STANDBY to OBSERVER
+    assertEquals(0, runTool("-transitionToObserver", "nn1"));
+    assertFalse(nnode1.isStandbyState());
+    assertTrue(nnode1.isObserverState());
+
+    // Transition from Observer to Observer should be no-op
+    assertEquals(0, runTool("-transitionToObserver", "nn1"));
+    assertTrue(nnode1.isObserverState());
+
+    // Should also be able to transition back from OBSERVER to STANDBY
+    assertEquals(0, runTool("-transitionToStandby", "nn1"));
+    assertTrue(nnode1.isStandbyState());
+    assertFalse(nnode1.isObserverState());
+  }
+
+  @Test
+  public void testObserverIllegalTransition() throws Exception {
+    NameNode nnode1 = cluster.getNameNode(0);
+    assertTrue(nnode1.isStandbyState());
+    assertEquals(0, runTool("-transitionToActive", "nn1"));
+    assertFalse(nnode1.isStandbyState());
+    assertTrue(nnode1.isActiveState());
+
+    // Should NOT be able to transition from ACTIVE to OBSERVER
+    assertEquals(-1, runTool("-transitionToObserver", "nn1"));
+    assertTrue(nnode1.isActiveState());
+
+    // Should NOT be able to transition from OBSERVER to ACTIVE
+    assertEquals(0, runTool("-transitionToStandby", "nn1"));
+    assertTrue(nnode1.isStandbyState());
+    assertEquals(0, runTool("-transitionToObserver", "nn1"));
+    assertTrue(nnode1.isObserverState());
+    assertEquals(-1, runTool("-transitionToActive", "nn1"));
+    assertFalse(nnode1.isActiveState());
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 3c117bc..b9b57e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -355,6 +355,13 @@ public class AdminService extends CompositeService implements
     }
   }
 
+  @Override
+  public synchronized void transitionToObserver(
+      StateChangeRequestInfo reqInfo) throws IOException {
+    // Should NOT get here, as RMHAServiceTarget doesn't support observer.
+    throw new ServiceFailedException("Does not support transition to Observer");
+  }
+
   /**
    * Return the HA status of this RM. This includes the current state and
    * whether the RM is ready to become active.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: YARN-8223. Improved yarn auxiliary service to load jar file from HDFS. Contributed by Zian Chen

Posted by xk...@apache.org.
YARN-8223.  Improved yarn auxiliary service to load jar file from HDFS.
            Contributed by Zian Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cdb032a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cdb032a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cdb032a

Branch: refs/heads/HDFS-12943
Commit: 8cdb032aff4237d8d3970057d82290e4e32c4040
Parents: 6795f80
Author: Eric Yang <ey...@apache.org>
Authored: Fri May 4 12:36:31 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Fri May 4 12:36:31 2018 -0400

----------------------------------------------------------------------
 .../PluggableShuffleAndPluggableSort.md         | 44 ++++++++++++++++++++
 .../containermanager/AuxServices.java           | 19 ++++++++-
 2 files changed, 61 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdb032a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
index 5ea0567..9e24103 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
@@ -67,6 +67,50 @@ The collector class configuration may specify a comma-separated list of collecto
 |:---- |:---- |:---- |
 | `yarn.nodemanager.aux-services` | `...,mapreduce_shuffle` | The auxiliary service name |
 | `yarn.nodemanager.aux-services.mapreduce_shuffle.class` | `org.apache.hadoop.mapred.ShuffleHandler` | The auxiliary service class to use |
+| `yarn.nodemanager.aux-services.%s.classpath` | NONE | local directory which includes the related jar file as well as all the dependencies’ jar file. We could specify the single jar file or use /dep/* to load all jars under the dep directory. |
+| `yarn.nodemanager.aux-services.%s.remote-classpath` | NONE | The remote absolute or relative path to jar file |
+
+#### Example of loading jar file from HDFS:
+
+```xml
+<configuration>
+    <property>
+        <name>yarn.nodemanager.aux-services</name>
+        <value>mapreduce_shuffle,AuxServiceFromHDFS</value>
+    </property>
+
+    <property>
+        <name>yarn.nodemanager.aux-services.AuxServiceFromHDFS.remote-classpath</name>
+        <value>/aux/test/aux-service-hdfs.jar</value>
+    </property>
+
+    <property>
+        <name>yarn.nodemanager.aux-services.AuxServiceFromHDFS.class&lt;/name>
+        <value>org.apache.auxtest.AuxServiceFromHDFS2</value>
+    </property>
+</configuration>
+```
+
+#### Example of loading jar file from local file system:
+
+```xml
+<configuration>
+    <property>
+        <name>yarn.nodemanager.aux-services</name>
+        <value>mapreduce_shuffle,AuxServiceFromHDFS</value>
+    </property>
+
+    <property>
+        <name>yarn.nodemanager.aux-services.AuxServiceFromHDFS.classpath</name>
+        <value>/aux/test/aux-service-hdfs.jar</value>
+    </property>
+
+    <property>
+        <name>yarn.nodemanager.aux-services.AuxServiceFromHDFS.class&lt;/name>
+        <value>org.apache.auxtest.AuxServiceFromHDFS2</value>
+    </property>
+</configuration>
+```
 
 **IMPORTANT:** If setting an auxiliary service in addition the default
 `mapreduce_shuffle` service, then a new service key should be added to the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdb032a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index c8b7a76..3fe3cfd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -230,15 +230,30 @@ public class AuxServices extends AbstractService
               }
             }
             if (reDownload) {
+              LocalResourceType srcType = null;
+              String lowerDst = StringUtils.toLowerCase(src.toString());
+              if (lowerDst.endsWith(".jar")) {
+                srcType = LocalResourceType.FILE;
+              } else if (lowerDst.endsWith(".zip") ||
+                  lowerDst.endsWith(".tar.gz") || lowerDst.endsWith(".tgz")
+                  || lowerDst.endsWith(".tar")) {
+                srcType = LocalResourceType.ARCHIVE;
+              } else {
+                throw new YarnRuntimeException(
+                    "Can not unpack file from remote-file-path:" + src
+                        + "for aux-service:" + ".\n");
+              }
               LocalResource scRsrc = LocalResource.newInstance(
                   URL.fromURI(src.toUri()),
-                  LocalResourceType.ARCHIVE, LocalResourceVisibility.PRIVATE,
+                  srcType, LocalResourceVisibility.PRIVATE,
                   scFileStatus.getLen(), scFileStatus.getModificationTime());
               FSDownload download = new FSDownload(localLFS, null, conf,
                   downloadDest, scRsrc, null);
               try {
                 Path downloaded = download.call();
-                dest = new Path(downloaded + Path.SEPARATOR + "*");
+                // don't need to convert downloaded path into a dir
+                // since its already a jar path.
+                dest = downloaded;
               } catch (Exception ex) {
                 throw new YarnRuntimeException(
                     "Exception happend while downloading files "


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HADOOP-12071. conftest is not documented. Contributed by Kengo Seki.

Posted by xk...@apache.org.
HADOOP-12071. conftest is not documented.
Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe649bb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe649bb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe649bb3

Branch: refs/heads/HDFS-12943
Commit: fe649bb3051f5647073c840d7334a90265ea3f06
Parents: 251f528
Author: Steve Loughran <st...@apache.org>
Authored: Wed May 2 13:33:56 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Wed May 2 13:33:56 2018 +0100

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/util/ConfTest.java | 10 ++++------
 .../src/site/markdown/CommandsManual.md            | 17 +++++++++++++++++
 2 files changed, 21 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe649bb3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
index 1915e79..a2cb85f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
@@ -84,7 +84,7 @@ public final class ConfTest {
     QName property = new QName("property");
 
     List<NodeInfo> nodes = new ArrayList<NodeInfo>();
-    Stack<NodeInfo> parsed = new Stack<NodeInfo>();
+    Stack<NodeInfo> parsed = new Stack<>();
 
     XMLInputFactory factory = XMLInputFactory.newInstance();
     XMLEventReader reader = factory.createXMLEventReader(in);
@@ -258,9 +258,7 @@ public final class ConfTest {
         if (confFile.isFile()) {
           files.add(confFile);
         } else if (confFile.isDirectory()) {
-          for (File file : listFiles(confFile)) {
-            files.add(file);
-          }
+          files.addAll(Arrays.asList(listFiles(confFile)));
         } else {
           terminate(1, confFile.getAbsolutePath()
               + " is neither a file nor directory");
@@ -313,9 +311,9 @@ class NodeInfo {
   private StartElement startElement;
   private List<Attribute> attributes = new ArrayList<Attribute>();
   private Map<StartElement, Characters> elements =
-      new HashMap<StartElement, Characters>();
+      new HashMap<>();
   private Map<QName, List<XMLEvent>> qNameXMLEventsMap =
-      new HashMap<QName, List<XMLEvent>>();
+      new HashMap<>();
 
   public NodeInfo(StartElement startElement) {
     this.startElement = startElement;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe649bb3/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 2839503..ce904c5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -99,6 +99,23 @@ Usage: `hadoop classpath [--glob |--jar <path> |-h |--help]`
 
 Prints the class path needed to get the Hadoop jar and the required libraries. If called without arguments, then prints the classpath set up by the command scripts, which is likely to contain wildcards in the classpath entries. Additional options print the classpath after wildcard expansion or write the classpath into the manifest of a jar file. The latter is useful in environments where wildcards cannot be used and the expanded classpath exceeds the maximum supported command line length.
 
+### `conftest`
+
+Usage: `hadoop conftest [-conffile <path>]...`
+
+| COMMAND\_OPTION | Description |
+|:---- |:---- |
+| `-conffile` | Path of a configuration file or directory to validate |
+| `-h`, `--help` | print help |
+
+Validates configuration XML files.
+If the `-conffile` option is not specified, the files in `${HADOOP_CONF_DIR}` whose name end with .xml will be verified. If specified, that path will be verified. You can specify either a file or directory, and if a directory specified, the files in that directory whose name end with `.xml` will be verified.
+You can specify `-conffile` option multiple times.
+
+The validation is fairly minimal: the XML is parsed and duplicate and empty
+property names are checked for. The command does not support XInclude; if you
+using that to pull in configuration items, it will declare the XML file invalid.
+
 ### `credential`
 
 Usage: `hadoop credential <subcommand> [options]`


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

Posted by xk...@apache.org.
HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc074a35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc074a35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc074a35

Branch: refs/heads/HDFS-12943
Commit: fc074a359c44e84dd9612be2bd772763f943eb04
Parents: 9b09555
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Apr 30 13:28:33 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Apr 30 13:28:33 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  38 ++--
 .../fsdataset/impl/FsVolumeImplBuilder.java     |  16 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java      |   2 +-
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++++++++++++++++++
 .../src/main/resources/hdfs-default.xml         |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java        |  90 +++++++-
 .../impl/TestReservedSpaceCalculator.java       | 171 ++++++++++++++
 8 files changed, 561 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index a7f0a07..bc8e81f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -647,8 +648,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+      "dfs.datanode.du.reserved.calculator";
+  public static final Class<? extends ReservedSpaceCalculator>
+      DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+          ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
   public static final long    DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+      "dfs.datanode.du.reserved.pct";
+  public static final int     DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
   public static final int     DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index b8c95a4..9969976 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -78,7 +78,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTrack
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
@@ -121,7 +120,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private final File currentDir;    // <StorageDirectory>/current
   private final DF usage;
-  private final long reserved;
+  private final ReservedSpaceCalculator reserved;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
   // Disk space reserved for blocks (RBW or Re-replicating) open for write.
@@ -142,10 +141,16 @@ public class FsVolumeImpl implements FsVolumeSpi {
    * contention.
    */
   protected ThreadPoolExecutor cacheExecutor;
-  
-  FsVolumeImpl(
-      FsDatasetImpl dataset, String storageID, StorageDirectory sd,
+
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
       FileIoProvider fileIoProvider, Configuration conf) throws IOException {
+    // outside tests, usage created in ReservedSpaceCalculator.Builder
+    this(dataset, storageID, sd, fileIoProvider, conf, null);
+  }
+
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
+      FileIoProvider fileIoProvider, Configuration conf, DF usage)
+      throws IOException {
 
     if (sd.getStorageLocation() == null) {
       throw new IOException("StorageLocation specified for storage directory " +
@@ -157,23 +162,20 @@ public class FsVolumeImpl implements FsVolumeSpi {
     this.storageLocation = sd.getStorageLocation();
     this.currentDir = sd.getCurrentDir();
     this.storageType = storageLocation.getStorageType();
-    this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
-        + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
     this.configuredCapacity = -1;
+    this.usage = usage;
     if (currentDir != null) {
       File parent = currentDir.getParentFile();
-      this.usage = new DF(parent, conf);
       cacheExecutor = initializeCacheExecutor(parent);
       this.metrics = DataNodeVolumeMetrics.create(conf, parent.getPath());
     } else {
-      this.usage = null;
       cacheExecutor = null;
       this.metrics = null;
     }
     this.conf = conf;
     this.fileIoProvider = fileIoProvider;
+    this.reserved = new ReservedSpaceCalculator.Builder(conf)
+        .setUsage(usage).setStorageType(storageType).build();
   }
 
   protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
@@ -399,7 +401,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   @VisibleForTesting
   public long getCapacity() {
     if (configuredCapacity < 0) {
-      long remaining = usage.getCapacity() - reserved;
+      long remaining = usage.getCapacity() - getReserved();
       return remaining > 0 ? remaining : 0;
     }
 
@@ -439,8 +441,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private long getRemainingReserved() throws IOException {
     long actualNonDfsUsed = getActualNonDfsUsed();
-    if (actualNonDfsUsed < reserved) {
-      return reserved - actualNonDfsUsed;
+    long actualReserved = getReserved();
+    if (actualNonDfsUsed < actualReserved) {
+      return actualReserved - actualNonDfsUsed;
     }
     return 0L;
   }
@@ -454,10 +457,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
    */
   public long getNonDfsUsed() throws IOException {
     long actualNonDfsUsed = getActualNonDfsUsed();
-    if (actualNonDfsUsed < reserved) {
+    long actualReserved = getReserved();
+    if (actualNonDfsUsed < actualReserved) {
       return 0L;
     }
-    return actualNonDfsUsed - reserved;
+    return actualNonDfsUsed - actualReserved;
   }
 
   @VisibleForTesting
@@ -476,7 +480,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   long getReserved(){
-    return reserved;
+    return reserved.getReserved();
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
index 2da9170..50ab97b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
@@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.IOException;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
@@ -34,12 +36,14 @@ public class FsVolumeImplBuilder {
   private StorageDirectory sd;
   private Configuration conf;
   private FileIoProvider fileIoProvider;
+  private DF usage;
 
   public FsVolumeImplBuilder() {
     dataset = null;
     storageID = null;
     sd = null;
     conf = null;
+    usage = null;
   }
 
   FsVolumeImplBuilder setDataset(FsDatasetImpl dataset) {
@@ -67,15 +71,25 @@ public class FsVolumeImplBuilder {
     return this;
   }
 
+  @VisibleForTesting
+  FsVolumeImplBuilder setUsage(DF newUsage) {
+    this.usage = newUsage;
+    return this;
+  }
+
   FsVolumeImpl build() throws IOException {
     if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
       return new ProvidedVolumeImpl(dataset, storageID, sd,
           fileIoProvider != null ? fileIoProvider :
             new FileIoProvider(null, null), conf);
     }
+    if (null == usage) {
+      // set usage unless overridden by unit tests
+      usage = new DF(sd.getCurrentDir().getParentFile(), conf);
+    }
     return new FsVolumeImpl(
         dataset, storageID, sd,
         fileIoProvider != null ? fileIoProvider :
-            new FileIoProvider(null, null), conf);
+            new FileIoProvider(null, null), conf, usage);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index ec1a8fd..e2d8681 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -270,7 +270,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
   ProvidedVolumeImpl(FsDatasetImpl dataset, String storageID,
       StorageDirectory sd, FileIoProvider fileIoProvider,
       Configuration conf) throws IOException {
-    super(dataset, storageID, sd, fileIoProvider, conf);
+    super(dataset, storageID, sd, fileIoProvider, conf, null);
     assert getStorageLocation().getStorageType() == StorageType.PROVIDED:
       "Only provided storages must use ProvidedVolume";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
new file mode 100644
index 0000000..5523cfd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.StringUtils;
+
+import java.lang.reflect.Constructor;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
+
+/**
+ * Used for calculating file system space reserved for non-HDFS data.
+ */
+public abstract class ReservedSpaceCalculator {
+
+  /**
+   * Used for creating instances of ReservedSpaceCalculator.
+   */
+  public static class Builder {
+
+    private final Configuration conf;
+
+    private DF usage;
+    private StorageType storageType;
+
+    public Builder(Configuration conf) {
+      this.conf = conf;
+    }
+
+    public Builder setUsage(DF newUsage) {
+      this.usage = newUsage;
+      return this;
+    }
+
+    public Builder setStorageType(
+        StorageType newStorageType) {
+      this.storageType = newStorageType;
+      return this;
+    }
+
+    ReservedSpaceCalculator build() {
+      try {
+        Class<? extends ReservedSpaceCalculator> clazz = conf.getClass(
+            DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+            DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT,
+            ReservedSpaceCalculator.class);
+
+        Constructor constructor = clazz.getConstructor(
+            Configuration.class, DF.class, StorageType.class);
+
+        return (ReservedSpaceCalculator) constructor.newInstance(
+            conf, usage, storageType);
+      } catch (Exception e) {
+        throw new IllegalStateException(
+            "Error instantiating ReservedSpaceCalculator", e);
+      }
+    }
+  }
+
+  private final DF usage;
+  private final Configuration conf;
+  private final StorageType storageType;
+
+  ReservedSpaceCalculator(Configuration conf, DF usage,
+      StorageType storageType) {
+    this.usage = usage;
+    this.conf = conf;
+    this.storageType = storageType;
+  }
+
+  DF getUsage() {
+    return usage;
+  }
+
+  long getReservedFromConf(String key, long defaultValue) {
+    return conf.getLong(key + "." + StringUtils.toLowerCase(
+        storageType.toString()), conf.getLong(key, defaultValue));
+  }
+
+  /**
+   * Return the capacity of the file system space reserved for non-HDFS.
+   *
+   * @return the number of bytes reserved for non-HDFS.
+   */
+  abstract long getReserved();
+
+
+  /**
+   * Based on absolute number of reserved bytes.
+   */
+  public static class ReservedSpaceCalculatorAbsolute extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+
+    public ReservedSpaceCalculatorAbsolute(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+    }
+
+    @Override
+    long getReserved() {
+      return reservedBytes;
+    }
+  }
+
+  /**
+   * Based on percentage of total capacity in the storage.
+   */
+  public static class ReservedSpaceCalculatorPercentage extends
+      ReservedSpaceCalculator {
+
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorPercentage(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    @Override
+    long getReserved() {
+      return getPercentage(getUsage().getCapacity(), reservedPct);
+    }
+  }
+
+  /**
+   * Calculates absolute and percentage based reserved space and
+   * picks the one that will yield more reserved space.
+   */
+  public static class ReservedSpaceCalculatorConservative extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorConservative(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    long getReservedBytes() {
+      return reservedBytes;
+    }
+
+    long getReservedPct() {
+      return reservedPct;
+    }
+
+    @Override
+    long getReserved() {
+      return Math.max(getReservedBytes(),
+          getPercentage(getUsage().getCapacity(), getReservedPct()));
+    }
+  }
+
+  /**
+   * Calculates absolute and percentage based reserved space and
+   * picks the one that will yield less reserved space.
+   */
+  public static class ReservedSpaceCalculatorAggressive extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorAggressive(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    long getReservedBytes() {
+      return reservedBytes;
+    }
+
+    long getReservedPct() {
+      return reservedPct;
+    }
+
+    @Override
+    long getReserved() {
+      return Math.min(getReservedBytes(),
+          getPercentage(getUsage().getCapacity(), getReservedPct()));
+    }
+  }
+
+  private static long getPercentage(long total, long percentage) {
+    return (total * percentage) / 100;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 921c166..c64b2f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -327,6 +327,20 @@
 </property>
 
 <property>
+  <name>dfs.datanode.du.reserved.calculator</name>
+  <value>org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator$ReservedSpaceCalculatorAbsolute</value>
+  <description>Determines the class of ReservedSpaceCalculator to be used for
+    calculating disk space reservedfor non-HDFS data. The default calculator is
+    ReservedSpaceCalculatorAbsolute which will use dfs.datanode.du.reserved
+    for a static reserved number of bytes. ReservedSpaceCalculatorPercentage
+    will use dfs.datanode.du.reserved.pct to calculate the reserved number
+    of bytes based on the size of the storage. ReservedSpaceCalculatorConservative and
+    ReservedSpaceCalculatorAggressive will use their combination, Conservative will use
+    maximum, Aggressive minimum. For more details see ReservedSpaceCalculator.
+  </description>
+</property>
+
+<property>
   <name>dfs.datanode.du.reserved</name>
   <value>0</value>
   <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
@@ -339,6 +353,20 @@
 </property>
 
 <property>
+  <name>dfs.datanode.du.reserved.pct</name>
+  <value>0</value>
+  <description>Reserved space in percentage. Read dfs.datanode.du.reserved.calculator to see
+    when this takes effect. The actual number of bytes reserved will be calculated by using the
+    total capacity of the data directory in question. Specific storage type based reservation
+    is also supported. The property can be followed with corresponding storage types
+    ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
+    For example, reserved percentage space for RAM_DISK storage can be configured using property
+    'dfs.datanode.du.reserved.pct.ram_disk'. If specific storage type reservation is not configured
+    then dfs.datanode.du.reserved.pct will be used.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.name.dir</name>
   <value>file://${hadoop.tmp.dir}/dfs/name</value>
   <description>Determines where on the local filesystem the DFS name node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index ee3a79f..f53c21c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -40,15 +41,18 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class TestFsVolumeList {
 
-  private final Configuration conf = new Configuration();
+  private Configuration conf;
   private VolumeChoosingPolicy<FsVolumeImpl> blockChooser =
       new RoundRobinVolumeChoosingPolicy<>();
   private FsDatasetImpl dataset = null;
@@ -63,6 +67,7 @@ public class TestFsVolumeList {
     blockScannerConf.setInt(DFSConfigKeys.
         DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
     blockScanner = new BlockScanner(null, blockScannerConf);
+    conf = new Configuration();
   }
 
   @Test(timeout=30000)
@@ -227,4 +232,87 @@ public class TestFsVolumeList {
         actualNonDfsUsage - duReserved;
     assertEquals(expectedNonDfsUsage, spyVolume.getNonDfsUsed());
   }
+
+  @Test
+  public void testDfsReservedPercentageForDifferentStorageTypes()
+      throws IOException {
+    conf.setClass(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 15);
+
+    File volDir = new File(baseDir, "volume-0");
+    volDir.mkdirs();
+
+    DF usage = mock(DF.class);
+    when(usage.getCapacity()).thenReturn(4000L);
+    when(usage.getAvailable()).thenReturn(1000L);
+
+    // when storage type reserved is not configured, should consider
+    // dfs.datanode.du.reserved.pct
+    FsVolumeImpl volume = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(
+            new StorageDirectory(StorageLocation.parse(
+                "[RAM_DISK]" + volDir.getPath())))
+        .setUsage(usage)
+        .build();
+
+    assertEquals(600, volume.getReserved());
+    assertEquals(3400, volume.getCapacity());
+    assertEquals(400, volume.getAvailable());
+
+    // when storage type reserved is configured.
+    conf.setLong(
+        DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+            + StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 10);
+    conf.setLong(
+        DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+            + StringUtils.toLowerCase(StorageType.SSD.toString()), 50);
+    FsVolumeImpl volume1 = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(
+            new StorageDirectory(StorageLocation.parse(
+                "[RAM_DISK]" + volDir.getPath())))
+        .setUsage(usage)
+        .build();
+    assertEquals(400, volume1.getReserved());
+    assertEquals(3600, volume1.getCapacity());
+    assertEquals(600, volume1.getAvailable());
+    FsVolumeImpl volume2 = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(
+            new StorageDirectory(StorageLocation.parse(
+                "[SSD]" + volDir.getPath())))
+        .setUsage(usage)
+        .build();
+    assertEquals(2000, volume2.getReserved());
+    assertEquals(2000, volume2.getCapacity());
+    assertEquals(0, volume2.getAvailable());
+    FsVolumeImpl volume3 = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(
+            new StorageDirectory(StorageLocation.parse(
+                "[DISK]" + volDir.getPath())))
+        .setUsage(usage)
+        .build();
+    assertEquals(600, volume3.getReserved());
+    FsVolumeImpl volume4 = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(
+            new StorageDirectory(StorageLocation.parse(volDir.getPath())))
+        .setUsage(usage)
+        .build();
+    assertEquals(600, volume4.getReserved());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
new file mode 100644
index 0000000..e04a239
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.StorageType;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAggressive;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorConservative;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit testing for different types of ReservedSpace calculators.
+ */
+public class TestReservedSpaceCalculator {
+
+  private Configuration conf;
+  private DF usage;
+  private ReservedSpaceCalculator reserved;
+
+  @Before
+  public void setUp() {
+    conf = new Configuration();
+    usage = Mockito.mock(DF.class);
+  }
+
+  @Test
+  public void testReservedSpaceAbsolute() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAbsolute.class,
+        ReservedSpaceCalculator.class);
+
+    // Test both using global configuration
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY, 900);
+
+    checkReserved(StorageType.DISK, 10000, 900);
+    checkReserved(StorageType.SSD, 10000, 900);
+    checkReserved(StorageType.ARCHIVE, 10000, 900);
+  }
+
+  @Test
+  public void testReservedSpaceAbsolutePerStorageType() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAbsolute.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 500);
+    checkReserved(StorageType.DISK, 2300, 500);
+
+    // Test SSD
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ssd", 750);
+    checkReserved(StorageType.SSD, 1550, 750);
+  }
+
+  @Test
+  public void testReservedSpacePercentage() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+
+    // Test both using global configuration
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 10);
+    checkReserved(StorageType.DISK, 10000, 1000);
+    checkReserved(StorageType.SSD, 10000, 1000);
+    checkReserved(StorageType.ARCHIVE, 10000, 1000);
+
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 50);
+    checkReserved(StorageType.DISK, 4000, 2000);
+    checkReserved(StorageType.SSD, 4000, 2000);
+    checkReserved(StorageType.ARCHIVE, 4000, 2000);
+  }
+
+  @Test
+  public void testReservedSpacePercentagePerStorageType() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
+    checkReserved(StorageType.DISK, 1600, 320);
+
+    // Test SSD
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ssd", 50);
+    checkReserved(StorageType.SSD, 8001, 4000);
+  }
+
+  @Test
+  public void testReservedSpaceConservativePerStorageType() {
+    // This policy should take the maximum of the two
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorConservative.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK + taking the reserved bytes over percentage,
+    // as that gives more reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 800);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
+    checkReserved(StorageType.DISK, 1600, 800);
+
+    // Test ARCHIVE + taking reserved space based on the percentage,
+    // as that gives more reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 1300);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 50);
+    checkReserved(StorageType.ARCHIVE, 6200, 3100);
+  }
+
+  @Test
+  public void testReservedSpaceAggresivePerStorageType() {
+    // This policy should take the maximum of the two
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAggressive.class,
+        ReservedSpaceCalculator.class);
+
+    // Test RAM_DISK + taking the reserved bytes over percentage,
+    // as that gives less reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ram_disk", 100);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ram_disk", 10);
+    checkReserved(StorageType.RAM_DISK, 1600, 100);
+
+    // Test ARCHIVE + taking reserved space based on the percentage,
+    // as that gives less reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 20000);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 5);
+    checkReserved(StorageType.ARCHIVE, 100000, 5000);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void testInvalidCalculator() {
+    conf.set(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY, "INVALIDTYPE");
+    reserved = new ReservedSpaceCalculator.Builder(conf)
+        .setUsage(usage)
+        .setStorageType(StorageType.DISK)
+        .build();
+  }
+
+  private void checkReserved(StorageType storageType,
+      long totalCapacity, long reservedExpected) {
+    when(usage.getCapacity()).thenReturn(totalCapacity);
+
+    reserved = new ReservedSpaceCalculator.Builder(conf).setUsage(usage)
+        .setStorageType(storageType).build();
+    assertEquals(reservedExpected, reserved.getReserved());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-8212. Pending backlog for async allocation threads should be configurable. Contributed by Tao Yang.

Posted by xk...@apache.org.
YARN-8212. Pending backlog for async allocation threads should be configurable. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d319e37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d319e37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d319e37

Branch: refs/heads/HDFS-12943
Commit: 2d319e37937c1e20c6a7dc4477ef88defd1f8464
Parents: a966ec6
Author: Weiwei Yang <ww...@apache.org>
Authored: Tue May 1 09:47:10 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Tue May 1 09:47:10 2018 +0800

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java               | 9 ++++++++-
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 8 ++++++++
 2 files changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d319e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 776e512..1d6c104 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -261,6 +261,7 @@ public class CapacityScheduler extends
       CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
           + ".scheduling-interval-ms";
   private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5;
+  private long asyncMaxPendingBacklogs;
 
   public CapacityScheduler() {
     super(CapacityScheduler.class.getName());
@@ -379,6 +380,11 @@ public class CapacityScheduler extends
           asyncSchedulerThreads.add(new AsyncScheduleThread(this));
         }
         resourceCommitterService = new ResourceCommitterService(this);
+        asyncMaxPendingBacklogs = this.conf.getInt(
+            CapacitySchedulerConfiguration.
+                SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS,
+            CapacitySchedulerConfiguration.
+                DEFAULT_SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS);
       }
 
       // Setup how many containers we can allocate for each round
@@ -573,7 +579,8 @@ public class CapacityScheduler extends
             Thread.sleep(100);
           } else {
             // Don't run schedule if we have some pending backlogs already
-            if (cs.getAsyncSchedulingPendingBacklogs() > 100) {
+            if (cs.getAsyncSchedulingPendingBacklogs()
+                > cs.asyncMaxPendingBacklogs) {
               Thread.sleep(1);
             } else{
               schedule(cs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d319e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index c41bd96..76eaac0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -246,6 +246,14 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
       SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".maximum-threads";
 
   @Private
+  public static final String SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS =
+      SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".maximum-pending-backlogs";
+
+  @Private
+  public static final Integer
+      DEFAULT_SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS = 100;
+
+  @Private
   public static final boolean DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE = false;
 
   @Private


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: YARN-7961. Improve status message for YARN service. Contributed by Gour Saha

Posted by xk...@apache.org.
YARN-7961. Improve status message for YARN service.
           Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fe3214d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fe3214d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fe3214d

Branch: refs/heads/HDFS-12943
Commit: 7fe3214d4bb810c0da18dd936875b4e2588ba518
Parents: ee2ce92
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 3 13:27:07 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 3 13:27:07 2018 -0400

----------------------------------------------------------------------
 .../yarn/service/client/ApiServiceClient.java    |  7 +++++++
 .../hadoop/yarn/service/webapp/ApiServer.java    | 10 +++++++---
 .../hadoop/yarn/service/ServiceClientTest.java   |  7 ++++---
 .../hadoop/yarn/service/TestApiServer.java       | 15 ++++++++++++++-
 .../service/client/TestApiServiceClient.java     | 19 ++++++++++++++++++-
 5 files changed, 50 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe3214d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index cdba555..757e664 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -479,6 +479,13 @@ public class ApiServiceClient extends AppAdminClient {
     try {
       ClientResponse response = getApiClient(getServicePath(appName))
           .get(ClientResponse.class);
+      if (response.getStatus() == 404) {
+        StringBuilder sb = new StringBuilder();
+        sb.append(" Service ");
+        sb.append(appName);
+        sb.append(" not found");
+        return sb.toString();
+      }
       if (response.getStatus() != 200) {
         StringBuilder sb = new StringBuilder();
         sb.append(appName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe3214d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 9a30fcf..8c7c0ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -186,7 +186,7 @@ public class ApiServer {
     ServiceStatus serviceStatus = new ServiceStatus();
     try {
       if (appName == null) {
-        throw new IllegalArgumentException("Service name can not be null.");
+        throw new IllegalArgumentException("Service name cannot be null.");
       }
       UserGroupInformation ugi = getProxyUser(request);
       LOG.info("GET: getService for appName = {} user = {}", appName, ugi);
@@ -194,12 +194,16 @@ public class ApiServer {
       return Response.ok(app).build();
     } catch (AccessControlException e) {
       return formatResponse(Status.FORBIDDEN, e.getMessage());
-    } catch (IllegalArgumentException |
-        FileNotFoundException e) {
+    } catch (IllegalArgumentException e) {
       serviceStatus.setDiagnostics(e.getMessage());
       serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
       return Response.status(Status.NOT_FOUND).entity(serviceStatus)
           .build();
+    } catch (FileNotFoundException e) {
+      serviceStatus.setDiagnostics("Service " + appName + " not found");
+      serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
+      return Response.status(Status.NOT_FOUND).entity(serviceStatus)
+          .build();
     } catch (IOException | InterruptedException e) {
       LOG.error("Get service failed: {}", e);
       return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe3214d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
index cff3e39..73a322c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.service.client.ServiceClient;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -81,11 +82,11 @@ public class ServiceClientTest extends ServiceClient {
   }
 
   @Override
-  public Service getStatus(String appName) {
-    if (appName != null && appName.equals("jenkins")) {
+  public Service getStatus(String appName) throws FileNotFoundException {
+    if ("jenkins".equals(appName)) {
       return goodServiceStatus;
     } else {
-      throw new IllegalArgumentException();
+      throw new FileNotFoundException("Service " + appName + " not found");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe3214d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
index 85c3cd4..38aeb59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.service.api.records.Resource;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
+import org.apache.hadoop.yarn.service.conf.RestApiConstants;
 import org.apache.hadoop.yarn.service.webapp.ApiServer;
 import org.junit.After;
 import org.junit.Before;
@@ -151,10 +152,17 @@ public class TestApiServer {
 
   @Test
   public void testBadGetService() {
-    final Response actual = apiServer.getService(request, "no-jenkins");
+    final String serviceName = "nonexistent-jenkins";
+    final Response actual = apiServer.getService(request, serviceName);
     assertEquals("Get service is ",
         Response.status(Status.NOT_FOUND).build().getStatus(),
         actual.getStatus());
+    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Response code don't match",
+        RestApiConstants.ERROR_CODE_APP_NAME_INVALID, serviceStatus.getCode());
+    assertEquals("Response diagnostics don't match",
+        "Service " + serviceName + " not found",
+        serviceStatus.getDiagnostics());
   }
 
   @Test
@@ -163,6 +171,11 @@ public class TestApiServer {
     assertEquals("Get service is ",
         Response.status(Status.NOT_FOUND).build().getStatus(),
         actual.getStatus());
+    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Response code don't match",
+        RestApiConstants.ERROR_CODE_APP_NAME_INVALID, serviceStatus.getCode());
+    assertEquals("Response diagnostics don't match",
+        "Service name cannot be null.", serviceStatus.getDiagnostics());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe3214d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
index a245144..fd31570 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
@@ -59,7 +59,12 @@ public class TestApiServiceClient {
     protected void doGet(HttpServletRequest req, HttpServletResponse resp)
         throws ServletException, IOException {
       System.out.println("Get was called");
-      resp.setStatus(HttpServletResponse.SC_OK);
+      if (req.getPathInfo() != null
+          && req.getPathInfo().contains("nonexistent-app")) {
+        resp.setStatus(HttpServletResponse.SC_NOT_FOUND);
+      } else {
+        resp.setStatus(HttpServletResponse.SC_OK);
+      }
     }
 
     @Override
@@ -140,6 +145,18 @@ public class TestApiServiceClient {
   }
 
   @Test
+  public void testStatus() {
+    String appName = "nonexistent-app";
+    try {
+      String result = asc.getStatusString(appName);
+      assertEquals("Status reponse don't match",
+          " Service " + appName + " not found", result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
   public void testStop() {
     String appName = "example-app";
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-8195. Fix constraint cardinality check in the presence of multiple target allocation tags. Contributed by Weiwei Yang.

Posted by xk...@apache.org.
YARN-8195. Fix constraint cardinality check in the presence of multiple target allocation tags. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b095554
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b095554
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b095554

Branch: refs/heads/HDFS-12943
Commit: 9b0955545174abe16fd81240db30f175145ee89b
Parents: 3d43474
Author: Konstantinos Karanasos <kk...@apache.org>
Authored: Mon Apr 30 11:54:30 2018 -0700
Committer: Konstantinos Karanasos <kk...@apache.org>
Committed: Mon Apr 30 11:54:30 2018 -0700

----------------------------------------------------------------------
 .../constraint/PlacementConstraintsUtil.java    |  8 +-
 .../TestPlacementConstraintsUtil.java           | 88 ++++++++++++++++++++
 2 files changed, 92 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b095554/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index efa7b65..f47e1d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -91,20 +91,20 @@ public final class PlacementConstraintsUtil {
     if (sc.getScope().equals(PlacementConstraints.NODE)) {
       if (checkMinCardinality) {
         minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-            allocationTags, Long::max);
+            allocationTags, Long::min);
       }
       if (checkMaxCardinality) {
         maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-            allocationTags, Long::min);
+            allocationTags, Long::max);
       }
     } else if (sc.getScope().equals(PlacementConstraints.RACK)) {
       if (checkMinCardinality) {
         minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-            allocationTags, Long::max);
+            allocationTags, Long::min);
       }
       if (checkMaxCardinality) {
         maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-            allocationTags, Long::min);
+            allocationTags, Long::max);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b095554/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
index 3248450..dc61981 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.concurrent.atomic.AtomicLong;
+import com.google.common.collect.ImmutableMap;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -228,6 +229,93 @@ public class TestPlacementConstraintsUtil {
   }
 
   @Test
+  public void testMultiTagsPlacementConstraints()
+      throws InvalidAllocationTagsQueryException {
+    PlacementConstraintManagerService pcm =
+        new MemoryPlacementConstraintManager();
+    AllocationTagsManager tm = new AllocationTagsManager(rmContext);
+    rmContext.setAllocationTagsManager(tm);
+    rmContext.setPlacementConstraintManager(pcm);
+
+    HashSet<String> st1 = new HashSet<>(Arrays.asList("X"));
+    HashSet<String> st2 = new HashSet<>(Arrays.asList("Y"));
+
+    // X anti-affinity with A and B
+    PlacementConstraint pc1 = PlacementConstraints.build(
+        targetNotIn(NODE, allocationTag("A", "B")));
+    // Y affinity with A and B
+    PlacementConstraint pc2 = PlacementConstraints.build(
+        targetIn(NODE, allocationTag("A", "B")));
+    Map<Set<String>, PlacementConstraint> constraintMap =
+        ImmutableMap.of(st1, pc1, st2, pc2);
+    // Register App1 with affinity constraint map
+    pcm.registerApplication(appId1, constraintMap);
+
+    /**
+     * Now place container:
+     * n0: A(1)
+     * n1: B(1)
+     * n2:
+     * n3:
+     */
+    RMNode n0_r1 = rmNodes.get(0);
+    RMNode n1_r1 = rmNodes.get(1);
+    RMNode n2_r2 = rmNodes.get(2);
+    RMNode n3_r2 = rmNodes.get(3);
+    SchedulerNode schedulerNode0 =newSchedulerNode(n0_r1.getHostName(),
+        n0_r1.getRackName(), n0_r1.getNodeID());
+    SchedulerNode schedulerNode1 =newSchedulerNode(n1_r1.getHostName(),
+        n1_r1.getRackName(), n1_r1.getNodeID());
+    SchedulerNode schedulerNode2 =newSchedulerNode(n2_r2.getHostName(),
+        n2_r2.getRackName(), n2_r2.getNodeID());
+    SchedulerNode schedulerNode3 =newSchedulerNode(n3_r2.getHostName(),
+        n3_r2.getRackName(), n3_r2.getNodeID());
+
+    ContainerId ca = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n0_r1.getNodeID(), ca, ImmutableSet.of("A"));
+
+    ContainerId cb = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n1_r1.getNodeID(), cb, ImmutableSet.of("B"));
+
+    // n0 and n1 has A/B so they cannot satisfy the PC
+    // n2 and n3 doesn't have A or B, so they can satisfy the PC
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st1), schedulerNode0, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st1), schedulerNode1, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st1), schedulerNode2, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st1), schedulerNode3, pcm, tm));
+
+    /**
+     * Now place container:
+     * n0: A(1)
+     * n1: B(1)
+     * n2: A(1), B(1)
+     * n3:
+     */
+    ContainerId ca1 = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n2_r2.getNodeID(), ca1, ImmutableSet.of("A"));
+    ContainerId cb1 = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n2_r2.getNodeID(), cb1, ImmutableSet.of("B"));
+
+    // Only n2 has both A and B so only it can satisfy the PC
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st2), schedulerNode0, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st2), schedulerNode1, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st2), schedulerNode2, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(st2), schedulerNode3, pcm, tm));
+  }
+
+  @Test
   public void testRackAffinityAssignment()
       throws InvalidAllocationTagsQueryException {
     PlacementConstraintManagerService pcm =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. Contributed by Anu Engineer.

Posted by xk...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
new file mode 100644
index 0000000..e42b887
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SendContainerReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
+
+
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.versionCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.registeredCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.sendContainerReport;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.deleteBlocksCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand;
+
+
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
+import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
+import org.apache.hadoop.ozone.protocolPB
+    .StorageContainerDatanodeProtocolServerSideTranslatorPB;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
+
+import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
+import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
+
+/**
+ * Protocol Handler for Datanode Protocol.
+ */
+public class SCMDatanodeProtocolServer implements
+    StorageContainerDatanodeProtocol {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      SCMDatanodeProtocolServer.class);
+
+  /**
+   * The RPC server that listens to requests from DataNodes.
+   */
+  private final RPC.Server datanodeRpcServer;
+
+  private final StorageContainerManager scm;
+  private final InetSocketAddress datanodeRpcAddress;
+
+  public SCMDatanodeProtocolServer(final OzoneConfiguration conf,
+      StorageContainerManager scm)  throws IOException {
+
+    Preconditions.checkNotNull(scm, "SCM cannot be null");
+    this.scm = scm;
+    final int handlerCount =
+        conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
+            OZONE_SCM_HANDLER_COUNT_DEFAULT);
+
+    RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+    BlockingService dnProtoPbService =
+        StorageContainerDatanodeProtocolProtos
+            .StorageContainerDatanodeProtocolService
+            .newReflectiveBlockingService(
+                new StorageContainerDatanodeProtocolServerSideTranslatorPB(
+                    this));
+
+    InetSocketAddress datanodeRpcAddr =
+        HddsServerUtil.getScmDataNodeBindAddress(conf);
+
+    datanodeRpcServer =
+        startRpcServer(
+            conf,
+            datanodeRpcAddr,
+            StorageContainerDatanodeProtocolPB.class,
+            dnProtoPbService,
+            handlerCount);
+
+    datanodeRpcAddress =
+        updateRPCListenAddress(
+            conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr,
+            datanodeRpcServer);
+  }
+
+  public InetSocketAddress getDatanodeRpcAddress() {
+    return datanodeRpcAddress;
+  }
+
+  public RPC.Server getDatanodeRpcServer() {
+    return datanodeRpcServer;
+  }
+
+  @Override
+  public SCMVersionResponseProto getVersion(SCMVersionRequestProto
+      versionRequest)
+      throws IOException {
+    return scm.getScmNodeManager().getVersion(versionRequest)
+        .getProtobufMessage();
+  }
+
+  @Override
+  public SCMHeartbeatResponseProto sendHeartbeat(
+      HddsProtos.DatanodeDetailsProto datanodeDetails,
+      StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport,
+      StorageContainerDatanodeProtocolProtos.ReportState reportState)
+      throws IOException {
+    List<SCMCommand> commands =
+        scm.getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport,
+            reportState);
+    List<SCMCommandResponseProto> cmdResponses = new LinkedList<>();
+    for (SCMCommand cmd : commands) {
+      cmdResponses.add(getCommandResponse(cmd, datanodeDetails.getUuid()));
+    }
+    return SCMHeartbeatResponseProto.newBuilder()
+        .addAllCommands(cmdResponses).build();
+  }
+
+  @Override
+  public SCMRegisteredCmdResponseProto register(
+      HddsProtos.DatanodeDetailsProto datanodeDetails, String[] scmAddresses)
+      throws IOException {
+    // TODO : Return the list of Nodes that forms the SCM HA.
+    return getRegisteredResponse(scm.getScmNodeManager()
+        .register(datanodeDetails), null);
+  }
+
+  @VisibleForTesting
+  public static SCMRegisteredCmdResponseProto getRegisteredResponse(
+        SCMCommand cmd,
+        StorageContainerDatanodeProtocolProtos.SCMNodeAddressList addressList) {
+    Preconditions.checkState(cmd.getClass() == RegisteredCommand.class);
+    RegisteredCommand rCmd = (RegisteredCommand) cmd;
+    SCMCmdType type = cmd.getType();
+    if (type != SCMCmdType.registeredCommand) {
+      throw new IllegalArgumentException(
+          "Registered command is not well " + "formed. Internal Error.");
+    }
+    return SCMRegisteredCmdResponseProto.newBuilder()
+        // TODO : Fix this later when we have multiple SCM support.
+        // .setAddressList(addressList)
+        .setErrorCode(rCmd.getError())
+        .setClusterID(rCmd.getClusterID())
+        .setDatanodeUUID(rCmd.getDatanodeUUID())
+        .build();
+  }
+
+  @Override
+  public ContainerReportsResponseProto sendContainerReport(
+      ContainerReportsRequestProto reports)
+      throws IOException {
+    updateContainerReportMetrics(reports);
+
+    // should we process container reports async?
+    scm.getScmContainerManager().processContainerReports(reports);
+    return ContainerReportsResponseProto.newBuilder().build();
+  }
+
+  private void updateContainerReportMetrics(
+      ContainerReportsRequestProto reports) {
+    ContainerStat newStat = null;
+    // TODO: We should update the logic once incremental container report
+    // type is supported.
+    if (reports
+        .getType() == StorageContainerDatanodeProtocolProtos
+        .ContainerReportsRequestProto.reportType.fullReport) {
+      newStat = new ContainerStat();
+      for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports
+          .getReportsList()) {
+        newStat.add(new ContainerStat(info.getSize(), info.getUsed(),
+            info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(),
+            info.getReadCount(), info.getWriteCount()));
+      }
+
+      // update container metrics
+      StorageContainerManager.getMetrics().setLastContainerStat(newStat);
+    }
+
+    // Update container stat entry, this will trigger a removal operation if it
+    // exists in cache.
+    synchronized (scm.getContainerReportCache()) {
+      String datanodeUuid = reports.getDatanodeDetails().getUuid();
+      if (datanodeUuid != null && newStat != null) {
+        scm.getContainerReportCache().put(datanodeUuid, newStat);
+        // update global view container metrics
+        StorageContainerManager.getMetrics().incrContainerStat(newStat);
+      }
+    }
+  }
+
+
+  @Override
+  public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(
+      ContainerBlocksDeletionACKProto acks) throws IOException {
+    if (acks.getResultsCount() > 0) {
+      List<DeleteBlockTransactionResult> resultList = acks.getResultsList();
+      for (DeleteBlockTransactionResult result : resultList) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Got block deletion ACK from datanode, TXIDs={}, "
+              + "success={}", result.getTxID(), result.getSuccess());
+        }
+        if (result.getSuccess()) {
+          LOG.debug("Purging TXID={} from block deletion log",
+              result.getTxID());
+          scm.getScmBlockManager().getDeletedBlockLog()
+              .commitTransactions(Collections.singletonList(result.getTxID()));
+        } else {
+          LOG.warn("Got failed ACK for TXID={}, prepare to resend the "
+              + "TX in next interval", result.getTxID());
+        }
+      }
+    }
+    return ContainerBlocksDeletionACKResponseProto.newBuilder()
+        .getDefaultInstanceForType();
+  }
+
+  public void start() {
+    LOG.info(
+        StorageContainerManager.buildRpcServerStartMessage(
+            "RPC server for DataNodes", getDatanodeRpcAddress()));
+    getDatanodeRpcServer().start();
+  }
+
+  public void stop() {
+    try {
+      LOG.info("Stopping the RPC server for DataNodes");
+      datanodeRpcServer.stop();
+    } catch (Exception ex) {
+      LOG.error(" datanodeRpcServer stop failed.", ex);
+    }
+    IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
+  }
+
+  public void join() throws InterruptedException {
+    LOG.trace("Join RPC server for DataNodes");
+    datanodeRpcServer.join();
+  }
+
+  /**
+   * Returns a SCMCommandRepose from the SCM Command.
+   *
+   * @param cmd - Cmd
+   * @return SCMCommandResponseProto
+   * @throws IOException
+   */
+  @VisibleForTesting
+  public StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto
+      getCommandResponse(
+      SCMCommand cmd, final String datanodeID) throws IOException {
+    SCMCmdType type = cmd.getType();
+    SCMCommandResponseProto.Builder builder =
+        SCMCommandResponseProto.newBuilder().setDatanodeUUID(datanodeID);
+    switch (type) {
+    case registeredCommand:
+      return builder
+          .setCmdType(registeredCommand)
+          .setRegisteredProto(SCMRegisteredCmdResponseProto
+              .getDefaultInstance())
+          .build();
+    case versionCommand:
+      return builder
+          .setCmdType(versionCommand)
+          .setVersionProto(SCMVersionResponseProto.getDefaultInstance())
+          .build();
+    case sendContainerReport:
+      return builder
+          .setCmdType(sendContainerReport)
+          .setSendReport(SendContainerReportProto.getDefaultInstance())
+          .build();
+    case reregisterCommand:
+      return builder
+          .setCmdType(reregisterCommand)
+          .setReregisterProto(SCMReregisterCmdResponseProto
+              .getDefaultInstance())
+          .build();
+    case deleteBlocksCommand:
+      // Once SCM sends out the deletion message, increment the count.
+      // this is done here instead of when SCM receives the ACK, because
+      // DN might not be able to response the ACK for sometime. In case
+      // it times out, SCM needs to re-send the message some more times.
+      List<Long> txs =
+          ((DeleteBlocksCommand) cmd)
+              .blocksTobeDeleted()
+              .stream()
+              .map(tx -> tx.getTxID())
+              .collect(Collectors.toList());
+      scm.getScmBlockManager().getDeletedBlockLog().incrementCount(txs);
+      return builder
+          .setCmdType(deleteBlocksCommand)
+          .setDeleteBlocksProto(((DeleteBlocksCommand) cmd).getProto())
+          .build();
+    case closeContainerCommand:
+      return builder
+          .setCmdType(closeContainerCommand)
+          .setCloseContainerProto(((CloseContainerCommand) cmd).getProto())
+          .build();
+    default:
+      throw new IllegalArgumentException("Not implemented");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
new file mode 100644
index 0000000..22d4d56
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
+
+import java.util.Map;
+
+/**
+ *
+ * This is the JMX management interface for scm information.
+ */
+@InterfaceAudience.Private
+public interface SCMMXBean extends ServiceRuntimeInfo {
+
+  /**
+   * Get the SCM RPC server port that used to listen to datanode requests.
+   * @return SCM datanode RPC server port
+   */
+  String getDatanodeRpcPort();
+
+  /**
+   * Get the SCM RPC server port that used to listen to client requests.
+   * @return SCM client RPC server port
+   */
+  String getClientRpcPort();
+
+  /**
+   * Get container report info that includes container IO stats of nodes.
+   * @return The datanodeUUid to report json string mapping
+   */
+  Map<String, String> getContainerReport();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java
new file mode 100644
index 0000000..be6c1af
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.ozone.common.Storage;
+
+import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
+import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR;
+
+/**
+ * SCMStorage is responsible for management of the StorageDirectories used by
+ * the SCM.
+ */
+public class SCMStorage extends Storage {
+
+  /**
+   * Construct SCMStorage.
+   * @throws IOException if any directories are inaccessible.
+   */
+  public SCMStorage(OzoneConfiguration conf) throws IOException {
+    super(NodeType.SCM, getOzoneMetaDirPath(conf), STORAGE_DIR);
+  }
+
+  public void setScmId(String scmId) throws IOException {
+    if (getState() == StorageState.INITIALIZED) {
+      throw new IOException("SCM is already initialized.");
+    } else {
+      getStorageInfo().setProperty(SCM_ID, scmId);
+    }
+  }
+
+  /**
+   * Retrieves the SCM ID from the version file.
+   * @return SCM_ID
+   */
+  public String getScmId() {
+    return getStorageInfo().getProperty(SCM_ID);
+  }
+
+  @Override
+  protected Properties getNodeProperties() {
+    String scmId = getScmId();
+    if (scmId == null) {
+      scmId = UUID.randomUUID().toString();
+    }
+    Properties scmProperties = new Properties();
+    scmProperties.setProperty(SCM_ID, scmId);
+    return scmProperties;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
new file mode 100644
index 0000000..af7dd3f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -0,0 +1,722 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.block.BlockManager;
+import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
+import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
+import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.common.StorageInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
+/**
+ * StorageContainerManager is the main entry point for the service that
+ * provides information about
+ * which SCM nodes host containers.
+ *
+ * <p>DataNodes report to StorageContainerManager using heartbeat messages.
+ * SCM allocates containers
+ * and returns a pipeline.
+ *
+ * <p>A client once it gets a pipeline (a list of datanodes) will connect to
+ * the datanodes and
+ * create a container, which then can be used to store data.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
+public final class StorageContainerManager extends ServiceRuntimeInfoImpl
+    implements SCMMXBean {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(StorageContainerManager.class);
+  private static final String USAGE =
+      "Usage: \n ozone scm [genericOptions] "
+          + "[ "
+          + StartupOption.INIT.getName()
+          + " [ "
+          + StartupOption.CLUSTERID.getName()
+          + " <cid> ] ]\n "
+          + "ozone scm [genericOptions] [ "
+          + StartupOption.GENCLUSTERID.getName()
+          + " ]\n "
+          + "ozone scm [ "
+          + StartupOption.HELP.getName()
+          + " ]\n";
+  /**
+   * SCM metrics.
+   */
+  private static SCMMetrics metrics;
+
+  /*
+   * RPC Endpoints exposed by SCM.
+   */
+  private final SCMDatanodeProtocolServer datanodeProtocolServer;
+  private final SCMBlockProtocolServer blockProtocolServer;
+  private final SCMClientProtocolServer clientProtocolServer;
+
+  /*
+   * State Managers of SCM.
+   */
+  private final NodeManager scmNodeManager;
+  private final Mapping scmContainerManager;
+  private final BlockManager scmBlockManager;
+  private final SCMStorage scmStorage;
+  /*
+   * HTTP endpoint for JMX access.
+   */
+  private final StorageContainerManagerHttpServer httpServer;
+  /**
+   * SCM super user.
+   */
+  private final String scmUsername;
+  private final Collection<String> scmAdminUsernames;
+  /**
+   * SCM mxbean.
+   */
+  private ObjectName scmInfoBeanName;
+  /**
+   * Key = DatanodeUuid, value = ContainerStat.
+   */
+  private Cache<String, ContainerStat> containerReportCache;
+
+  /**
+   * Creates a new StorageContainerManager. Configuration will be updated
+   * with information on the
+   * actual listening addresses used for RPC servers.
+   *
+   * @param conf configuration
+   */
+  private StorageContainerManager(OzoneConfiguration conf) throws IOException {
+
+    final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
+        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
+
+    StorageContainerManager.initMetrics();
+    initContainerReportCache(conf);
+
+    scmStorage = new SCMStorage(conf);
+    if (scmStorage.getState() != StorageState.INITIALIZED) {
+      throw new SCMException("SCM not initialized.", ResultCodes
+          .SCM_NOT_INITIALIZED);
+    }
+
+    scmNodeManager = new SCMNodeManager(conf, scmStorage.getClusterID(), this);
+    scmContainerManager = new ContainerMapping(conf, getScmNodeManager(),
+        cacheSize);
+
+    scmBlockManager =
+        new BlockManagerImpl(conf, getScmNodeManager(), scmContainerManager,
+            cacheSize);
+
+    scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
+        .OZONE_ADMINISTRATORS);
+    scmUsername = UserGroupInformation.getCurrentUser().getUserName();
+    if (!scmAdminUsernames.contains(scmUsername)) {
+      scmAdminUsernames.add(scmUsername);
+    }
+
+    datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this);
+    blockProtocolServer = new SCMBlockProtocolServer(conf, this);
+    clientProtocolServer = new SCMClientProtocolServer(conf, this);
+    httpServer = new StorageContainerManagerHttpServer(conf);
+
+    registerMXBean();
+  }
+
+  /**
+   * Builds a message for logging startup information about an RPC server.
+   *
+   * @param description RPC server description
+   * @param addr RPC server listening address
+   * @return server startup message
+   */
+  public static String buildRpcServerStartMessage(String description,
+      InetSocketAddress addr) {
+    return addr != null
+        ? String.format("%s is listening at %s", description, addr.toString())
+        : String.format("%s not started", description);
+  }
+
+  /**
+   * Starts an RPC server, if configured.
+   *
+   * @param conf configuration
+   * @param addr configured address of RPC server
+   * @param protocol RPC protocol provided by RPC server
+   * @param instance RPC protocol implementation instance
+   * @param handlerCount RPC server handler count
+   * @return RPC server
+   * @throws IOException if there is an I/O error while creating RPC server
+   */
+  public static RPC.Server startRpcServer(
+      OzoneConfiguration conf,
+      InetSocketAddress addr,
+      Class<?> protocol,
+      BlockingService instance,
+      int handlerCount)
+      throws IOException {
+    RPC.Server rpcServer =
+        new RPC.Builder(conf)
+            .setProtocol(protocol)
+            .setInstance(instance)
+            .setBindAddress(addr.getHostString())
+            .setPort(addr.getPort())
+            .setNumHandlers(handlerCount)
+            .setVerbose(false)
+            .setSecretManager(null)
+            .build();
+
+    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
+    return rpcServer;
+  }
+
+  /**
+   * Main entry point for starting StorageContainerManager.
+   *
+   * @param argv arguments
+   * @throws IOException if startup fails due to I/O error
+   */
+  public static void main(String[] argv) throws IOException {
+    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+    try {
+      OzoneConfiguration conf = new OzoneConfiguration();
+      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
+      if (!hParser.isParseSuccessful()) {
+        System.err.println("USAGE: " + USAGE + "\n");
+        hParser.printGenericCommandUsage(System.err);
+        System.exit(1);
+      }
+      StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+          LOG);
+      StorageContainerManager scm = createSCM(hParser.getRemainingArgs(), conf);
+      if (scm != null) {
+        scm.start();
+        scm.join();
+      }
+    } catch (Throwable t) {
+      LOG.error("Failed to start the StorageContainerManager.", t);
+      terminate(1, t);
+    }
+  }
+
+  private static void printUsage(PrintStream out) {
+    out.println(USAGE + "\n");
+  }
+
+  public static StorageContainerManager createSCM(String[] argv,
+      OzoneConfiguration conf)
+      throws IOException {
+    if (!HddsUtils.isHddsEnabled(conf)) {
+      System.err.println(
+          "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" +
+              " is set to false");
+      System.exit(1);
+    }
+    StartupOption startOpt = parseArguments(argv);
+    if (startOpt == null) {
+      printUsage(System.err);
+      terminate(1);
+      return null;
+    }
+    switch (startOpt) {
+    case INIT:
+      terminate(scmInit(conf) ? 0 : 1);
+      return null;
+    case GENCLUSTERID:
+      System.out.println("Generating new cluster id:");
+      System.out.println(StorageInfo.newClusterID());
+      terminate(0);
+      return null;
+    case HELP:
+      printUsage(System.err);
+      terminate(0);
+      return null;
+    default:
+      return new StorageContainerManager(conf);
+    }
+  }
+
+  /**
+   * Routine to set up the Version info for StorageContainerManager.
+   *
+   * @param conf OzoneConfiguration
+   * @return true if SCM initialization is successful, false otherwise.
+   * @throws IOException if init fails due to I/O error
+   */
+  public static boolean scmInit(OzoneConfiguration conf) throws IOException {
+    SCMStorage scmStorage = new SCMStorage(conf);
+    StorageState state = scmStorage.getState();
+    if (state != StorageState.INITIALIZED) {
+      try {
+        String clusterId = StartupOption.INIT.getClusterId();
+        if (clusterId != null && !clusterId.isEmpty()) {
+          scmStorage.setClusterId(clusterId);
+        }
+        scmStorage.initialize();
+        System.out.println(
+            "SCM initialization succeeded."
+                + "Current cluster id for sd="
+                + scmStorage.getStorageDir()
+                + ";cid="
+                + scmStorage.getClusterID());
+        return true;
+      } catch (IOException ioe) {
+        LOG.error("Could not initialize SCM version file", ioe);
+        return false;
+      }
+    } else {
+      System.out.println(
+          "SCM already initialized. Reusing existing"
+              + " cluster id for sd="
+              + scmStorage.getStorageDir()
+              + ";cid="
+              + scmStorage.getClusterID());
+      return true;
+    }
+  }
+
+  private static StartupOption parseArguments(String[] args) {
+    int argsLen = (args == null) ? 0 : args.length;
+    StartupOption startOpt = StartupOption.HELP;
+    if (argsLen == 0) {
+      startOpt = StartupOption.REGULAR;
+    }
+    for (int i = 0; i < argsLen; i++) {
+      String cmd = args[i];
+      if (StartupOption.INIT.getName().equalsIgnoreCase(cmd)) {
+        startOpt = StartupOption.INIT;
+        if (argsLen > 3) {
+          return null;
+        }
+        for (i = i + 1; i < argsLen; i++) {
+          if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+            i++;
+            if (i < argsLen && !args[i].isEmpty()) {
+              startOpt.setClusterId(args[i]);
+            } else {
+              // if no cluster id specified or is empty string, return null
+              LOG.error(
+                  "Must specify a valid cluster ID after the "
+                      + StartupOption.CLUSTERID.getName()
+                      + " flag");
+              return null;
+            }
+          } else {
+            return null;
+          }
+        }
+      } else {
+        if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
+          if (argsLen > 1) {
+            return null;
+          }
+          startOpt = StartupOption.GENCLUSTERID;
+        }
+      }
+    }
+    return startOpt;
+  }
+
+  /**
+   * Initialize SCM metrics.
+   */
+  public static void initMetrics() {
+    metrics = SCMMetrics.create();
+  }
+
+  /**
+   * Return SCM metrics instance.
+   */
+  public static SCMMetrics getMetrics() {
+    return metrics == null ? SCMMetrics.create() : metrics;
+  }
+
+  public SCMStorage getScmStorage() {
+    return scmStorage;
+  }
+
+  public SCMDatanodeProtocolServer getDatanodeProtocolServer() {
+    return datanodeProtocolServer;
+  }
+
+  public SCMBlockProtocolServer getBlockProtocolServer() {
+    return blockProtocolServer;
+  }
+
+  public SCMClientProtocolServer getClientProtocolServer() {
+    return clientProtocolServer;
+  }
+
+  /**
+   * Initialize container reports cache that sent from datanodes.
+   *
+   * @param conf
+   */
+  private void initContainerReportCache(OzoneConfiguration conf) {
+    containerReportCache =
+        CacheBuilder.newBuilder()
+            .expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS)
+            .maximumSize(Integer.MAX_VALUE)
+            .removalListener(
+                new RemovalListener<String, ContainerStat>() {
+                  @Override
+                  public void onRemoval(
+                      RemovalNotification<String, ContainerStat>
+                          removalNotification) {
+                    synchronized (containerReportCache) {
+                      ContainerStat stat = removalNotification.getValue();
+                      // remove invalid container report
+                      metrics.decrContainerStat(stat);
+                      LOG.debug(
+                          "Remove expired container stat entry for datanode: " +
+                              "{}.",
+                          removalNotification.getKey());
+                    }
+                  }
+                })
+            .build();
+  }
+
+  private void registerMXBean() {
+    Map<String, String> jmxProperties = new HashMap<>();
+    jmxProperties.put("component", "ServerRuntime");
+    this.scmInfoBeanName =
+        MBeans.register(
+            "StorageContainerManager", "StorageContainerManagerInfo",
+            jmxProperties, this);
+  }
+
+  private void unregisterMXBean() {
+    if (this.scmInfoBeanName != null) {
+      MBeans.unregister(this.scmInfoBeanName);
+      this.scmInfoBeanName = null;
+    }
+  }
+
+  @VisibleForTesting
+  public ContainerInfo getContainerInfo(String containerName) throws
+      IOException {
+    return scmContainerManager.getContainer(containerName);
+  }
+
+  /**
+   * Returns listening address of StorageLocation Protocol RPC server.
+   *
+   * @return listen address of StorageLocation RPC server
+   */
+  @VisibleForTesting
+  public InetSocketAddress getClientRpcAddress() {
+    return getClientProtocolServer().getClientRpcAddress();
+  }
+
+  @Override
+  public String getClientRpcPort() {
+    InetSocketAddress addr = getClientRpcAddress();
+    return addr == null ? "0" : Integer.toString(addr.getPort());
+  }
+
+  /**
+   * Returns listening address of StorageDatanode Protocol RPC server.
+   *
+   * @return Address where datanode are communicating.
+   */
+  public InetSocketAddress getDatanodeRpcAddress() {
+    return getDatanodeProtocolServer().getDatanodeRpcAddress();
+  }
+
+  @Override
+  public String getDatanodeRpcPort() {
+    InetSocketAddress addr = getDatanodeRpcAddress();
+    return addr == null ? "0" : Integer.toString(addr.getPort());
+  }
+
+  /**
+   * Start service.
+   */
+  public void start() throws IOException {
+    LOG.info(
+        buildRpcServerStartMessage(
+            "StorageContainerLocationProtocol RPC server",
+            getClientRpcAddress()));
+    DefaultMetricsSystem.initialize("StorageContainerManager");
+    getClientProtocolServer().start();
+
+    LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
+        "server", getBlockProtocolServer().getBlockRpcAddress()));
+    getBlockProtocolServer().start();
+
+    LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
+        "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
+    getDatanodeProtocolServer().start();
+
+    httpServer.start();
+    scmBlockManager.start();
+
+    setStartTime();
+  }
+
+  /**
+   * Stop service.
+   */
+  public void stop() {
+
+    try {
+      LOG.info("Stopping datanode service RPC server");
+      getDatanodeProtocolServer().stop();
+
+    } catch (Exception ex) {
+      LOG.error("Storage Container Manager datanode RPC stop failed.", ex);
+    }
+
+    try {
+      LOG.info("Stopping block service RPC server");
+      getBlockProtocolServer().stop();
+    } catch (Exception ex) {
+      LOG.error("Storage Container Manager blockRpcServer stop failed.", ex);
+    }
+
+    try {
+      LOG.info("Stopping the StorageContainerLocationProtocol RPC server");
+      getClientProtocolServer().stop();
+    } catch (Exception ex) {
+      LOG.error("Storage Container Manager clientRpcServer stop failed.", ex);
+    }
+
+    try {
+      LOG.info("Stopping Storage Container Manager HTTP server.");
+      httpServer.stop();
+    } catch (Exception ex) {
+      LOG.error("Storage Container Manager HTTP server stop failed.", ex);
+    }
+
+    try {
+      LOG.info("Stopping Block Manager Service.");
+      scmBlockManager.stop();
+    } catch (Exception ex) {
+      LOG.error("SCM block manager service stop failed.", ex);
+    }
+
+    if (containerReportCache != null) {
+      containerReportCache.invalidateAll();
+      containerReportCache.cleanUp();
+    }
+
+    if (metrics != null) {
+      metrics.unRegister();
+    }
+
+    unregisterMXBean();
+    IOUtils.cleanupWithLogger(LOG, scmContainerManager);
+  }
+
+  /**
+   * Wait until service has completed shutdown.
+   */
+  public void join() {
+    try {
+      getBlockProtocolServer().join();
+      getClientProtocolServer().join();
+      getDatanodeProtocolServer().join();
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      LOG.info("Interrupted during StorageContainerManager join.");
+    }
+  }
+
+  /**
+   * Returns the Number of Datanodes that are communicating with SCM.
+   *
+   * @param nodestate Healthy, Dead etc.
+   * @return int -- count
+   */
+  public int getNodeCount(NodeState nodestate) {
+    return scmNodeManager.getNodeCount(nodestate);
+  }
+
+  /**
+   * Returns SCM container manager.
+   */
+  @VisibleForTesting
+  public Mapping getScmContainerManager() {
+    return scmContainerManager;
+  }
+
+  /**
+   * Returns node manager.
+   *
+   * @return - Node Manager
+   */
+  @VisibleForTesting
+  public NodeManager getScmNodeManager() {
+    return scmNodeManager;
+  }
+
+  @VisibleForTesting
+  public BlockManager getScmBlockManager() {
+    return scmBlockManager;
+  }
+
+  @VisibleForTesting
+  public String getPpcRemoteUsername() {
+    UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
+    return user == null ? null : user.getUserName();
+  }
+
+  public void checkAdminAccess() throws IOException {
+    String remoteUser = getPpcRemoteUsername();
+    if (remoteUser != null) {
+      if (!scmAdminUsernames.contains(remoteUser)) {
+        throw new IOException(
+            "Access denied for user " + remoteUser + ". Superuser privilege " +
+                "is required.");
+      }
+    }
+  }
+
+  /**
+   * Invalidate container stat entry for given datanode.
+   *
+   * @param datanodeUuid
+   */
+  public void removeContainerReport(String datanodeUuid) {
+    synchronized (containerReportCache) {
+      containerReportCache.invalidate(datanodeUuid);
+    }
+  }
+
+  /**
+   * Get container stat of specified datanode.
+   *
+   * @param datanodeUuid
+   * @return
+   */
+  public ContainerStat getContainerReport(String datanodeUuid) {
+    ContainerStat stat = null;
+    synchronized (containerReportCache) {
+      stat = containerReportCache.getIfPresent(datanodeUuid);
+    }
+
+    return stat;
+  }
+
+  /**
+   * Returns a view of the container stat entries. Modifications made to the
+   * map will directly
+   * affect the cache.
+   *
+   * @return
+   */
+  public ConcurrentMap<String, ContainerStat> getContainerReportCache() {
+    return containerReportCache.asMap();
+  }
+
+  @Override
+  public Map<String, String> getContainerReport() {
+    Map<String, String> id2StatMap = new HashMap<>();
+    synchronized (containerReportCache) {
+      ConcurrentMap<String, ContainerStat> map = containerReportCache.asMap();
+      for (Map.Entry<String, ContainerStat> entry : map.entrySet()) {
+        id2StatMap.put(entry.getKey(), entry.getValue().toJsonString());
+      }
+    }
+
+    return id2StatMap;
+  }
+
+  /**
+   * Startup options.
+   */
+  public enum StartupOption {
+    INIT("-init"),
+    CLUSTERID("-clusterid"),
+    GENCLUSTERID("-genclusterid"),
+    REGULAR("-regular"),
+    HELP("-help");
+
+    private final String name;
+    private String clusterId = null;
+
+    StartupOption(String arg) {
+      this.name = arg;
+    }
+
+    public String getClusterId() {
+      return clusterId;
+    }
+
+    public void setClusterId(String cid) {
+      if (cid != null && !cid.isEmpty()) {
+        clusterId = cid;
+      }
+    }
+
+    public String getName() {
+      return name;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
new file mode 100644
index 0000000..75b2036
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.server.BaseHttpServer;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+
+import java.io.IOException;
+
+/**
+ * HttpServer2 wrapper for the Ozone Storage Container Manager.
+ */
+public class StorageContainerManagerHttpServer extends BaseHttpServer {
+
+  public StorageContainerManagerHttpServer(Configuration conf)
+      throws IOException {
+    super(conf, "scm");
+  }
+
+  @Override protected String getHttpAddressKey() {
+    return ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
+  }
+
+  @Override protected String getHttpBindHostKey() {
+    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY;
+  }
+
+  @Override protected String getHttpsAddressKey() {
+    return ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY;
+  }
+
+  @Override protected String getHttpsBindHostKey() {
+    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_HOST_KEY;
+  }
+
+  @Override protected String getBindHostDefault() {
+    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT;
+  }
+
+  @Override protected int getHttpBindPortDefault() {
+    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT;
+  }
+
+  @Override protected int getHttpsBindPortDefault() {
+    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT;
+  }
+
+  @Override protected String getKeytabFile() {
+    return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE;
+  }
+
+  @Override protected String getSpnegoPrincipal() {
+    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+  }
+
+  @Override protected String getEnabledKey() {
+    return ScmConfigKeys.OZONE_SCM_HTTP_ENABLED_KEY;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
new file mode 100644
index 0000000..fe07272
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 5ca9dd7..0dbb7c1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
@@ -36,7 +37,9 @@ import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
 import java.io.File;
+import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.Arrays;
@@ -95,7 +98,7 @@ public class TestStorageContainerManagerHttpServer {
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
     conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
 
-    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
+    InetSocketAddress.createUnresolved("localhost", 0);
     StorageContainerManagerHttpServer server = null;
     try {
       server = new StorageContainerManagerHttpServer(conf);
@@ -128,7 +131,7 @@ public class TestStorageContainerManagerHttpServer {
       URLConnection conn = connectionFactory.openConnection(url);
       conn.connect();
       conn.getContent();
-    } catch (Exception e) {
+    } catch (IOException e) {
       return false;
     }
     return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 7419743..00261c7 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -108,7 +108,7 @@ function ozonecmd_case
     ;;
     scm)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.StorageContainerManager'
+      HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManager'
       hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto HADOOP_OPTS"
       HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}"
     ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index 87d203e..f745788 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.junit.After;
@@ -70,7 +70,8 @@ public class TestContainerStateManager {
   public void testAllocateContainer() throws IOException {
     // Allocate a container and verify the container info
     String container1 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container1, containerOwner);
     ContainerInfo info = containerStateManager
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
@@ -87,7 +88,8 @@ public class TestContainerStateManager {
 
     // Check there are two containers in ALLOCATED state after allocation
     String container2 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container2, containerOwner);
     int numContainers = containerStateManager
         .getMatchingContainerIDs(containerOwner,
@@ -101,7 +103,8 @@ public class TestContainerStateManager {
     // Allocate 5 containers in ALLOCATED state and 5 in CREATING state
     String cname = "container" + RandomStringUtils.randomNumeric(5);
     for (int i = 0; i < 10; i++) {
-      scm.allocateContainer(xceiverClientManager.getType(),
+      scm.getClientProtocolServer().allocateContainer(
+          xceiverClientManager.getType(),
           xceiverClientManager.getFactor(), cname + i, containerOwner);
       if (i >= 5) {
         scm.getScmContainerManager()
@@ -128,7 +131,8 @@ public class TestContainerStateManager {
   @Test
   public void testGetMatchingContainer() throws IOException {
     String container1 = "container-01234";
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container1, containerOwner);
     scmContainerMapping.updateContainerState(container1,
         HddsProtos.LifeCycleEvent.CREATE);
@@ -136,7 +140,8 @@ public class TestContainerStateManager {
         HddsProtos.LifeCycleEvent.CREATED);
 
     String container2 = "container-56789";
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container2, containerOwner);
 
     ContainerInfo info = containerStateManager
@@ -177,7 +182,8 @@ public class TestContainerStateManager {
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
     String container1 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container1, containerOwner);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -229,7 +235,8 @@ public class TestContainerStateManager {
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // DELETING
     String container2 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container2, containerOwner);
     scmContainerMapping.updateContainerState(container2,
         HddsProtos.LifeCycleEvent.CREATE);
@@ -243,7 +250,8 @@ public class TestContainerStateManager {
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // OPEN -> CLOSING -> CLOSED
     String container3 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container3, containerOwner);
     scmContainerMapping.updateContainerState(container3,
         HddsProtos.LifeCycleEvent.CREATE);
@@ -262,7 +270,8 @@ public class TestContainerStateManager {
   @Test
   public void testUpdatingAllocatedBytes() throws Exception {
     String container1 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.allocateContainer(xceiverClientManager.getType(),
+    scm.getClientProtocolServer().allocateContainer(
+        xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), container1, containerOwner);
     scmContainerMapping.updateContainerState(container1,
         HddsProtos.LifeCycleEvent.CREATE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 8c49f65..091d771 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
+ * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.ksm.KeySpaceManager;
 import org.apache.hadoop.hdds.scm.protocolPB
@@ -38,6 +38,17 @@ import java.util.concurrent.TimeoutException;
 public interface MiniOzoneCluster {
 
   /**
+   * Returns the Builder to construct MiniOzoneCluster.
+   *
+   * @param conf OzoneConfiguration
+   *
+   * @return MiniOzoneCluster builder
+   */
+  static Builder newBuilder(OzoneConfiguration conf) {
+    return new MiniOzoneClusterImpl.Builder(conf);
+  }
+
+  /**
    * Returns the configuration object associated with the MiniOzoneCluster.
    *
    * @return Configuration
@@ -119,8 +130,8 @@ public interface MiniOzoneCluster {
    * @return StorageContainerLocation Client
    * @throws IOException
    */
-  StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient()
-      throws IOException;
+  StorageContainerLocationProtocolClientSideTranslatorPB
+      getStorageContainerLocationClient() throws IOException;
 
   /**
    * Restarts StorageContainerManager instance.
@@ -156,19 +167,9 @@ public interface MiniOzoneCluster {
   void shutdown();
 
   /**
-   * Returns the Builder to construct MiniOzoneCluster.
-   *
-   * @param conf OzoneConfiguration
-   *
-   * @return MiniOzoneCluster builder
-   */
-  static Builder newBuilder(OzoneConfiguration conf) {
-    return new MiniOzoneClusterImpl.Builder(conf);
-  }
-
-  /**
    * Builder class for MiniOzoneCluster.
    */
+  @SuppressWarnings("CheckStyle")
   abstract class Builder {
 
     protected static final int DEFAULT_HB_INTERVAL_MS = 1000;
@@ -261,7 +262,6 @@ public interface MiniOzoneCluster {
       return this;
     }
 
-
     /**
      * Sets the number of HeartBeat Interval of Datanodes, the value should be
      * in MilliSeconds.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 891f67b..17872f4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -35,14 +35,14 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
 import org.apache.hadoop.ozone.ksm.KeySpaceManager;
-import org.apache.hadoop.hdds.scm.SCMStorage;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.ozone.ksm.KSMStorage;
 import org.apache.hadoop.ozone.web.client.OzoneRestClient;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -179,8 +179,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
    * @throws IOException if there is an I/O error
    */
   @Override
-  public StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient()
-      throws IOException {
+  public StorageContainerLocationProtocolClientSideTranslatorPB
+      getStorageContainerLocationClient() throws IOException {
     long version = RPC.getProtocolVersion(
         StorageContainerLocationProtocolPB.class);
     InetSocketAddress address = scm.getClientRpcAddress();
@@ -226,7 +226,7 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
 
       File baseDir = new File(GenericTestUtils.getTempPath(
           MiniOzoneClusterImpl.class.getSimpleName() + "-" +
-              scm.getScmInfo().getClusterId()));
+              scm.getClientProtocolServer().getScmInfo().getClusterId()));
       FileUtils.deleteDirectory(baseDir);
 
       if (ksm != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index fa307c9..5a5a08b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.SCMStorage;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -32,8 +32,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
-import org.apache.hadoop.hdds.scm.StorageContainerManager.StartupOption;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager.StartupOption;
 import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
 import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -113,7 +113,7 @@ public class TestStorageContainerManager {
           .thenReturn(fakeUser);
 
       try {
-        mockScm.deleteContainer("container1");
+        mockScm.getClientProtocolServer().deleteContainer("container1");
         fail("Operation should fail, expecting an IOException here.");
       } catch (Exception e) {
         if (expectPermissionDenied) {
@@ -127,8 +127,8 @@ public class TestStorageContainerManager {
       }
 
       try {
-        Pipeline pipeLine2 = mockScm.allocateContainer(
-            xceiverClientManager.getType(),
+        Pipeline pipeLine2 = mockScm.getClientProtocolServer()
+            .allocateContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, "container2", "OZONE");
         if (expectPermissionDenied) {
           fail("Operation should fail, expecting an IOException here.");
@@ -140,8 +140,8 @@ public class TestStorageContainerManager {
       }
 
       try {
-        Pipeline pipeLine3 = mockScm.allocateContainer(
-            xceiverClientManager.getType(),
+        Pipeline pipeLine3 = mockScm.getClientProtocolServer()
+            .allocateContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, "container3", "OZONE");
 
         if (expectPermissionDenied) {
@@ -155,7 +155,7 @@ public class TestStorageContainerManager {
       }
 
       try {
-        mockScm.getContainer("container4");
+        mockScm.getClientProtocolServer().getContainer("container4");
         fail("Operation should fail, expecting an IOException here.");
       } catch (Exception e) {
         if (expectPermissionDenied) {
@@ -436,7 +436,7 @@ public class TestStorageContainerManager {
     scmStore.initialize();
     StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
     //Reads the SCM Info from SCM instance
-    ScmInfo scmInfo = scm.getScmInfo();
+    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
     Assert.assertEquals(clusterId, scmInfo.getClusterId());
     Assert.assertEquals(scmId, scmInfo.getScmId());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 7005ea0..9917018 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -157,7 +157,7 @@ public class TestStorageContainerManagerHelper {
   private MetadataStore getContainerMetadata(String containerName)
       throws IOException {
     Pipeline pipeline = cluster.getStorageContainerManager()
-        .getContainer(containerName);
+        .getClientProtocolServer().getContainer(containerName);
     DatanodeDetails leadDN = pipeline.getLeader();
     OzoneContainer containerServer =
         getContainerServerByDatanodeUuid(leadDN.getUuidString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
index ad5783e..a2a04e0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.junit.AfterClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
index a37f005..ae0ffa0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.hdds.scm.SCMStorage;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.protocol.proto
     .KeySpaceManagerProtocolProtos.ServicePort;
@@ -646,7 +646,8 @@ public class TestKeySpaceManager {
     keys.add(keyArgs.getResourceName());
     exception.expect(IOException.class);
     exception.expectMessage("Specified block key does not exist");
-    cluster.getStorageContainerManager().getBlockLocations(keys);
+    cluster.getStorageContainerManager().getBlockProtocolServer()
+        .getBlockLocations(keys);
 
     // Delete the key again to test deleting non-existing key.
     exception.expect(IOException.class);
@@ -818,9 +819,6 @@ public class TestKeySpaceManager {
     listKeyArgs = new ListArgs(bucketArgs, null, 100, null);
     result = storageHandler.listKeys(listKeyArgs);
     Assert.assertEquals(numKeys, result.getKeyList().size());
-    List<KeyInfo> allKeys = result.getKeyList().stream()
-        .filter(item -> item.getSize() == 4096)
-        .collect(Collectors.toList());
 
     // List keys with prefix "aKey".
     listKeyArgs = new ListArgs(bucketArgs, "aKey", 100, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index fffdbff..04473d1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.scm;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -124,7 +124,7 @@ public class TestSCMCli {
   public void testCreateContainer() throws Exception {
     String containerName =  "containerTestCreate";
     try {
-      scm.getContainer(containerName);
+      scm.getClientProtocolServer().getContainer(containerName);
       fail("should not be able to get the container");
     } catch (IOException ioe) {
       assertTrue(ioe.getMessage().contains(
@@ -132,14 +132,16 @@ public class TestSCMCli {
     }
     String[] args = {"-container", "-create", "-c", containerName};
     assertEquals(ResultCode.SUCCESS, cli.run(args));
-    Pipeline container = scm.getContainer(containerName);
+    Pipeline container = scm.getClientProtocolServer()
+        .getContainer(containerName);
     assertNotNull(container);
     assertEquals(containerName, container.getContainerName());
   }
 
   private boolean containerExist(String containerName) {
     try {
-      Pipeline scmPipeline = scm.getContainer(containerName);
+      Pipeline scmPipeline = scm.getClientProtocolServer()
+          .getContainer(containerName);
       return scmPipeline != null
           && containerName.equals(scmPipeline.getContainerName());
     } catch (IOException e) {
@@ -447,7 +449,8 @@ public class TestSCMCli {
     String containerName =  "containerTestClose";
     String[] args = {"-container", "-create", "-c", containerName};
     assertEquals(ResultCode.SUCCESS, cli.run(args));
-    Pipeline container = scm.getContainer(containerName);
+    Pipeline container = scm.getClientProtocolServer()
+        .getContainer(containerName);
     assertNotNull(container);
     assertEquals(containerName, container.getContainerName());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
index 27a9404..372fd3d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.scm;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
index c28f68f..332e679 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
@@ -26,7 +26,7 @@ import java.util.UUID;
 
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -80,7 +80,7 @@ public class TestSCMMetrics {
       ContainerReportsRequestProto request = createContainerReport(numReport,
           stat, null);
       String fstDatanodeUuid = request.getDatanodeDetails().getUuid();
-      scmManager.sendContainerReport(request);
+      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
 
       // verify container stat metrics
       MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
@@ -103,7 +103,7 @@ public class TestSCMMetrics {
       // add one new report
       request = createContainerReport(1, stat, null);
       String sndDatanodeUuid = request.getDatanodeDetails().getUuid();
-      scmManager.sendContainerReport(request);
+      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
 
       scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
       assertEquals(size * (numReport + 1),
@@ -125,12 +125,12 @@ public class TestSCMMetrics {
       // Re-send reports but with different value for validating
       // the aggregation.
       stat = new ContainerStat(100, 50, 3, 50, 60, 5, 6);
-      scmManager.sendContainerReport(createContainerReport(1, stat,
-          fstDatanodeUuid));
+      scmManager.getDatanodeProtocolServer().sendContainerReport(
+          createContainerReport(1, stat, fstDatanodeUuid));
 
       stat = new ContainerStat(1, 1, 1, 1, 1, 1, 1);
-      scmManager.sendContainerReport(createContainerReport(1, stat,
-          sndDatanodeUuid));
+      scmManager.getDatanodeProtocolServer().sendContainerReport(
+          createContainerReport(1, stat, sndDatanodeUuid));
 
       // the global container metrics value should be updated
       scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
@@ -175,7 +175,7 @@ public class TestSCMMetrics {
           .getDatanodeDetails().getUuidString();
       ContainerReportsRequestProto request = createContainerReport(numReport,
           stat, datanodeUuid);
-      scmManager.sendContainerReport(request);
+      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
 
       MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
       assertEquals(size * numReport,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org