You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bh...@apache.org on 2018/07/09 20:19:37 UTC

[01/50] [abbrv] hadoop git commit: YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)

Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 44e19fc7f -> d1baaff8c


YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17262470
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17262470
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17262470

Branch: refs/heads/HDDS-48
Commit: 17262470246232d0f0651d627a4961e55b1efe6a
Parents: 71df8c2
Author: Haibo Chen <ha...@apache.org>
Authored: Thu Jul 5 10:42:39 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Thu Jul 5 10:42:39 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 ++
 .../api/records/impl/LightWeightResource.java   |  23 ++-
 .../scheduler/fair/ConfigurableResource.java    |  69 +++++++-
 .../fair/FairSchedulerConfiguration.java        | 174 ++++++++++++++++---
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../fair/TestFairSchedulerConfiguration.java    | 151 ++++++++++++----
 .../src/site/markdown/FairScheduler.md          |   6 +-
 8 files changed, 385 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5841361..5cc81e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -67,11 +67,6 @@
   </Match>
   <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
-    <Method name="getLocalityStatistics" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>
@@ -118,6 +113,18 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
 
+  <!-- Ignore exposed internal representations -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.api.records.Resource" />
+    <Method name="getResources" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
+    <Method name="getLocalityStatistics" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+
   <!-- Object cast is based on the event type -->
   <Match>
     <Class name="org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher" />

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 71a6b54..173d4c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
@@ -75,6 +76,18 @@ public abstract class Resource implements Comparable<Resource> {
   @Private
   public static final int VCORES_INDEX = 1;
 
+  /**
+   * Return a new {@link Resource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   * @return a new {@link Resource} instance
+   */
+  @Private
+  @Unstable
+  public static Resource newInstance(long value) {
+    return new LightWeightResource(value);
+  }
+
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index a6e6432..77f77f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -18,9 +18,8 @@
 
 package org.apache.hadoop.yarn.api.records.impl;
 
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -58,13 +57,29 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.*;
  *
  * @see Resource
  */
-@InterfaceAudience.Private
+@Private
 @Unstable
 public class LightWeightResource extends Resource {
 
   private ResourceInformation memoryResInfo;
   private ResourceInformation vcoresResInfo;
 
+  /**
+   * Create a new {@link LightWeightResource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   */
+  public LightWeightResource(long value) {
+    ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
+    initResourceInformations(value, value, types.length);
+
+    for (int i = 2; i < types.length; i++) {
+      resources[i] = new ResourceInformation();
+      ResourceInformation.copy(types[i], resources[i]);
+      resources[i].setValue(value);
+    }
+  }
+
   public LightWeightResource(long memory, int vcores) {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
@@ -91,7 +106,7 @@ public class LightWeightResource extends Resource {
     }
   }
 
-  private void initResourceInformations(long memory, int vcores,
+  private void initResourceInformations(long memory, long vcores,
       int numberOfKnownResourceTypes) {
     this.memoryResInfo = newDefaultInformation(MEMORY_URI, MEMORY_MB.getUnits(),
         memory);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
index ecdd011..0c3b0dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
@@ -18,9 +18,13 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import java.util.Arrays;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
  * A {@code ConfigurableResource} object represents an entity that is used to
@@ -33,29 +37,53 @@ public class ConfigurableResource {
   private final Resource resource;
   private final double[] percentages;
 
-  public ConfigurableResource(double[] percentages) {
+  ConfigurableResource() {
+    this(getOneHundredPercentArray());
+  }
+
+  ConfigurableResource(double[] percentages) {
     this.percentages = percentages.clone();
     this.resource = null;
   }
 
+  ConfigurableResource(long value) {
+    this(Resource.newInstance(value));
+  }
+
   public ConfigurableResource(Resource resource) {
     this.percentages = null;
     this.resource = resource;
   }
 
+  private static double[] getOneHundredPercentArray() {
+    double[] resourcePercentages =
+        new double[ResourceUtils.getNumberOfKnownResourceTypes()];
+    Arrays.fill(resourcePercentages, 1.0);
+
+    return resourcePercentages;
+  }
+
   /**
    * Get resource by multiplying the cluster resource and the percentage of
    * each resource respectively. Return the absolute resource if either
    * {@code percentages} or {@code clusterResource} is null.
    *
    * @param clusterResource the cluster resource
-   * @return resource
+   * @return resource the resulting resource
    */
   public Resource getResource(Resource clusterResource) {
     if (percentages != null && clusterResource != null) {
       long memory = (long) (clusterResource.getMemorySize() * percentages[0]);
       int vcore = (int) (clusterResource.getVirtualCores() * percentages[1]);
-      return Resource.newInstance(memory, vcore);
+      Resource res = Resource.newInstance(memory, vcore);
+      ResourceInformation[] clusterInfo = clusterResource.getResources();
+
+      for (int i = 2; i < clusterInfo.length; i++) {
+        res.setResourceValue(i,
+            (long)(clusterInfo[i].getValue() * percentages[i]));
+      }
+
+      return res;
     } else {
       return resource;
     }
@@ -69,4 +97,39 @@ public class ConfigurableResource {
   public Resource getResource() {
     return resource;
   }
+
+  /**
+   * Set the value of the wrapped resource if this object isn't setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the value
+   */
+  void setValue(String name, long value) {
+    if (resource != null) {
+      resource.setResourceValue(name, value);
+    }
+  }
+
+  /**
+   * Set the percentage of the resource if this object is setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the percentage
+   */
+  void setPercentage(String name, double value) {
+    if (percentages != null) {
+      Integer index = ResourceUtils.getResourceTypeIndex().get(name);
+
+      if (index != null) {
+        percentages[index] = value;
+      } else {
+        throw new ResourceNotFoundException("The requested resource, \""
+            + name + "\", could not be found.");
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index b50e4bb..8c4932b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -213,6 +214,9 @@ public class FairSchedulerConfiguration extends Configuration {
           CONF_PREFIX + "reservable-nodes";
   public static final float RESERVABLE_NODES_DEFAULT = 0.05f;
 
+  private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
+          "Error reading resource config--invalid resource definition: ";
+
   public FairSchedulerConfiguration() {
     super();
   }
@@ -407,54 +411,167 @@ public class FairSchedulerConfiguration extends Configuration {
   }
 
   /**
-   * Parses a resource config value of a form like "1024", "1024 mb",
-   * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.
-   * 
-   * @throws AllocationConfigurationException
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@link Long#MAX_VALUE} or 100%, as appropriate. Also, in the new
+   * style resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * @param value the resource definition to parse
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
    */
-  public static ConfigurableResource parseResourceConfigValue(String val)
+  public static ConfigurableResource parseResourceConfigValue(String value)
       throws AllocationConfigurationException {
+    return parseResourceConfigValue(value, Long.MAX_VALUE);
+  }
+
+  /**
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@code missing} or 0%, as appropriate. Also, in the new style
+   * resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * The {@code missing} parameter is only used in the case of new style
+   * resources without percentages. With new style resources with percentages,
+   * any missing resources will be assumed to be 100% because percentages are
+   * only used with maximum resource limits.
+   *
+   * @param value the resource definition to parse
+   * @param missing the value to use for any unspecified resources
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
+   */
+  public static ConfigurableResource parseResourceConfigValue(String value,
+      long missing) throws AllocationConfigurationException {
     ConfigurableResource configurableResource;
+
+    if (value.trim().isEmpty()) {
+      throw new AllocationConfigurationException("Error reading resource "
+          + "config--the resource string is empty.");
+    }
+
     try {
-      val = StringUtils.toLowerCase(val);
-      if (val.contains("%")) {
-        configurableResource = new ConfigurableResource(
-            getResourcePercentage(val));
+      if (value.contains("=")) {
+        configurableResource = parseNewStyleResource(value, missing);
+      } else if (value.contains("%")) {
+        configurableResource = parseOldStyleResourceAsPercentage(value);
       } else {
-        int memory = findResource(val, "mb");
-        int vcores = findResource(val, "vcores");
-        configurableResource = new ConfigurableResource(
-            BuilderUtils.newResource(memory, vcores));
+        configurableResource = parseOldStyleResource(value);
       }
-    } catch (AllocationConfigurationException ex) {
-      throw ex;
-    } catch (Exception ex) {
+    } catch (RuntimeException ex) {
       throw new AllocationConfigurationException(
           "Error reading resource config", ex);
     }
+
+    return configurableResource;
+  }
+
+  private static ConfigurableResource parseNewStyleResource(String value,
+          long missing) throws AllocationConfigurationException {
+
+    final ConfigurableResource configurableResource;
+    boolean asPercent = value.contains("%");
+    if (asPercent) {
+      configurableResource = new ConfigurableResource();
+    } else {
+      configurableResource = new ConfigurableResource(missing);
+    }
+
+    String[] resources = value.split(",");
+    for (String resource : resources) {
+      String[] parts = resource.split("=");
+
+      if (parts.length != 2) {
+        throw createConfigException(value,
+                        "Every resource must be of the form: name=value.");
+      }
+
+      String resourceName = parts[0].trim();
+      String resourceValue = parts[1].trim();
+      try {
+        if (asPercent) {
+          configurableResource.setPercentage(resourceName,
+              findPercentage(resourceValue, ""));
+        } else {
+          configurableResource.setValue(resourceName,
+              Long.parseLong(resourceValue));
+        }
+      } catch (ResourceNotFoundException ex) {
+        throw createConfigException(value, "The "
+            + "resource name, \"" + resourceName + "\" was not "
+            + "recognized. Please check the value of "
+            + YarnConfiguration.RESOURCE_TYPES + " in the Resource "
+            + "Manager's configuration files.", ex);
+      } catch (NumberFormatException ex) {
+        // This only comes from Long.parseLong()
+        throw createConfigException(value, "The "
+            + "resource values must all be integers. \"" + resourceValue
+            + "\" is not an integer.", ex);
+      } catch (AllocationConfigurationException ex) {
+        // This only comes from findPercentage()
+        throw createConfigException(value, "The "
+            + "resource values must all be percentages. \""
+            + resourceValue + "\" is either not a number or does not "
+            + "include the '%' symbol.", ex);
+      }
+    }
     return configurableResource;
   }
 
+  private static ConfigurableResource parseOldStyleResourceAsPercentage(
+          String value) throws AllocationConfigurationException {
+    return new ConfigurableResource(
+            getResourcePercentage(StringUtils.toLowerCase(value)));
+  }
+
+  private static ConfigurableResource parseOldStyleResource(String value)
+          throws AllocationConfigurationException {
+    final String lCaseValue = StringUtils.toLowerCase(value);
+    int memory = findResource(lCaseValue, "mb");
+    int vcores = findResource(lCaseValue, "vcores");
+
+    return new ConfigurableResource(
+            BuilderUtils.newResource(memory, vcores));
+  }
+
   private static double[] getResourcePercentage(
       String val) throws AllocationConfigurationException {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
     double[] resourcePercentage = new double[numberOfKnownResourceTypes];
     String[] strings = val.split(",");
+
     if (strings.length == 1) {
       double percentage = findPercentage(strings[0], "");
       for (int i = 0; i < numberOfKnownResourceTypes; i++) {
-        resourcePercentage[i] = percentage/100;
+        resourcePercentage[i] = percentage;
       }
     } else {
-      resourcePercentage[0] = findPercentage(val, "memory")/100;
-      resourcePercentage[1] = findPercentage(val, "cpu")/100;
+      resourcePercentage[0] = findPercentage(val, "memory");
+      resourcePercentage[1] = findPercentage(val, "cpu");
     }
+
     return resourcePercentage;
   }
 
   private static double findPercentage(String val, String units)
-    throws AllocationConfigurationException {
+      throws AllocationConfigurationException {
     final Pattern pattern =
         Pattern.compile("((\\d+)(\\.\\d*)?)\\s*%\\s*" + units);
     Matcher matcher = pattern.matcher(val);
@@ -467,7 +584,22 @@ public class FairSchedulerConfiguration extends Configuration {
             units);
       }
     }
-    return Double.parseDouble(matcher.group(1));
+    return Double.parseDouble(matcher.group(1)) / 100.0;
+  }
+
+  private static AllocationConfigurationException createConfigException(
+          String value, String message) {
+    return createConfigException(value, message, null);
+  }
+
+  private static AllocationConfigurationException createConfigException(
+      String value, String message, Throwable t) {
+    String msg = INVALID_RESOURCE_DEFINITION_PREFIX + value + ". " + message;
+    if (t != null) {
+      return new AllocationConfigurationException(msg, t);
+    } else {
+      return new AllocationConfigurationException(msg);
+    }
   }
 
   public long getUpdateInterval() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index d5a436e..441c34a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -134,7 +134,7 @@ public class AllocationFileQueueParser {
       if (MIN_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);
         ConfigurableResource val =
-            FairSchedulerConfiguration.parseResourceConfigValue(text);
+            FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
         builder.minQueueResources(queueName, val.getResource());
       } else if (MAX_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 481645b..76a5af5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -102,60 +102,145 @@ public class TestFairSchedulerConfiguration {
 
   @Test
   public void testParseResourceConfigValue() throws Exception {
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2 vcores, 1024 mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 mb, 2 vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2vcores,1024mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024mb,2vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024   mb, 2    vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 Mb, 2 vCores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024 mb, 2 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024.3 mb, 2.35 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024. mb, 2. vcores  ").getResource());
-
-    Resource clusterResource = BuilderUtils.newResource(2048, 4);
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    Resource expected = BuilderUtils.newResource(5 * 1024, 2);
+    Resource clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+
+    assertEquals(expected,
+        parseResourceConfigValue("2 vcores, 5120 mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 mb, 2 vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("2vcores,5120mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb,2vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb   mb, 2    vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 Mb, 2 vCores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120 mb, 2 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120.3 mb, 2.35 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120. mb, 2. vcores  ").getResource());
+
+    assertEquals(expected,
         parseResourceConfigValue("50% memory, 50% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50% Memory, 50% CpU").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("50%").getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue("50% memory, 100% cpu").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue(" 100% cpu, 50% memory").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 0),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 0),
         parseResourceConfigValue("50% memory, 0% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50 % memory, 50 % cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50%memory,50%cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("  50  %  memory,  50  %  cpu  ").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50.% memory, 50.% cpu").
             getResource(clusterResource));
-
-    clusterResource =  BuilderUtils.newResource(1024 * 10, 4);
     assertEquals(BuilderUtils.newResource((int)(1024 * 10 * 0.109), 2),
         parseResourceConfigValue("10.9% memory, 50.6% cpu").
             getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+
+    Configuration conf = new Configuration();
+
+    conf.set(YarnConfiguration.RESOURCE_TYPES, "test1");
+    ResourceUtils.resetResourceTypes(conf);
+
+    clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+    expected = BuilderUtils.newResource(5 * 1024, 2);
+    expected.setResourceValue("test1", Long.MAX_VALUE);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120").getResource());
+    assertEquals(expected, parseResourceConfigValue(" vcores = 2 , "
+            + "memory-mb = 5120 ").getResource());
+
+    expected.setResourceValue("test1", 0L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 ",
+            0L).getResource());
+
+    clusterResource.setResourceValue("test1", 8L);
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120, "
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("test1=4, vcores=2, "
+            + "memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, test1=4, "
+            + "vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120,"
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 , "
+            + "test1 = 4 ").getResource());
+
+    expected = BuilderUtils.newResource(4 * 1024, 3);
+    expected.setResourceValue("test1", 8L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,"
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , "
+            + "memory-mb = 40 % ").getResource(clusterResource));
+
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, memory-mb=40%, "
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("test1=50%, vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, test1=50%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,memory-mb=40%,"
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
+            + "test1 = 50 % ").getResource(clusterResource));
   }
   
   @Test(expected = AllocationConfigurationException.class)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 269f5b4..b5bcbf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,11 +86,11 @@ The allocation file must be in XML format. The format contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an optional attribute 'type', which when set to 'parent' makes it a parent queue. This is useful when we want to create a parent queue without configuring any leaf queues. Each queue element may contain the following properties:
 
-    * **minResources**: minimum resources the queue is entitled to, in the form "X mb, Y vcores". For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and minimum. Note that it is possible that a queue that is below its minimum may not immediately get up to its minimum when it submits an application, because already-running jobs may be using those resources.
+    * **minResources**: minimum resources the queue is entitled to, in the form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is required when specifying resources other than memory and CPU. For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and its minimum. Note that it is possible for a queue that is below its minimum to not immediately get up to its minimum when an a
 pplication is submitted to the queue, because already-running jobs may be using those resources.
 
-    * **maxResources**: maximum resources a queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). A queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxResources**: maximum resources a queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. A queue will not be assigned a container that would put its aggregate usage over this limit.
 
-    * **maxChildResources**: maximum resources an ad hoc child queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxChildResources**: maximum resources an ad hoc child queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
 
     * **maxRunningApps**: limit the number of apps from the queue to run at once
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by bh...@apache.org.
YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99febe7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99febe7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99febe7f

Branch: refs/heads/HDDS-48
Commit: 99febe7fd50c31c0f5dd40fa7f376f2c1f64f8c3
Parents: 1726247
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Jul 5 10:54:19 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Thu Jul 5 10:54:19 2018 -0700

----------------------------------------------------------------------
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    |   9 +-
 .../webapp/TestRMWebServices.java               |  31 ++-
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 +++++++++++++++++
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 +++----
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ++++++++++
 .../FairSchedulerJsonVerifications.java         | 139 ++++++++++
 .../FairSchedulerXmlVerifications.java          | 153 +++++++++++
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 +++++++++++++++++++
 .../webapp/helper/AppInfoJsonVerifications.java | 123 +++++++++
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 +++++++++
 .../webapp/helper/BufferedClientResponse.java   |  57 ++++
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ++++++
 .../ResourceRequestsJsonVerifications.java      | 252 +++++++++++++++++
 .../ResourceRequestsXmlVerifications.java       | 215 +++++++++++++++
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 ++++++++
 21 files changed, 2020 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index d47f13d..9d82bc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -479,7 +479,7 @@ public class AppInfo {
   public int getNumNonAMContainersPreempted() {
     return numNonAMContainerPreempted;
   }
-  
+
   public int getNumAMContainersPreempted() {
     return numAMContainerPreempted;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 81491b1..163f707 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -41,8 +41,9 @@ public class SchedulerInfo {
   protected EnumSet<SchedulerResourceTypes> schedulingResourceTypes;
   protected int maximumClusterPriority;
 
+  // JAXB needs this
   public SchedulerInfo() {
-  } // JAXB needs this
+  }
 
   public SchedulerInfo(final ResourceManager rm) {
     ResourceScheduler rs = rm.getResourceScheduler();
@@ -74,7 +75,10 @@ public class SchedulerInfo {
   }
 
   public String getSchedulerResourceTypes() {
-    return Arrays.toString(minAllocResource.getResource().getResources());
+    if (minAllocResource != null) {
+      return Arrays.toString(minAllocResource.getResource().getResources());
+    }
+    return null;
   }
 
   public int getMaxClusterLevelAppPriority() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 76a5af5..70f83ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -48,6 +48,9 @@ import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Test;
 
+/**
+ * Tests fair scheduler configuration.
+ */
 public class TestFairSchedulerConfiguration {
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
@@ -242,12 +245,12 @@ public class TestFairSchedulerConfiguration {
         parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
             + "test1 = 50 % ").getResource(clusterResource));
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testNoUnits() throws Exception {
     parseResourceConfigValue("1024");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testOnlyMemory() throws Exception {
     parseResourceConfigValue("1024mb");
@@ -257,7 +260,7 @@ public class TestFairSchedulerConfiguration {
   public void testOnlyCPU() throws Exception {
     parseResourceConfigValue("1024vcores");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testGibberish() throws Exception {
     parseResourceConfigValue("1o24vc0res");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 0702d65..3902889 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -53,11 +53,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
-import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.*;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -76,11 +72,12 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
-import org.eclipse.jetty.server.Response;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -96,6 +93,8 @@ import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 public class TestRMWebServices extends JerseyTestBase {
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestRMWebServices.class);
 
   private static MockRM rm;
 
@@ -472,19 +471,19 @@ public class TestRMWebServices extends JerseyTestBase {
     QueueMetrics metrics = rs.getRootQueueMetrics();
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
 
-    long totalMBExpect = 
+    long totalMBExpect =
         metrics.getAvailableMB() + metrics.getAllocatedMB();
-    long totalVirtualCoresExpect = 
+    long totalVirtualCoresExpect =
         metrics.getAvailableVirtualCores() + metrics.getAllocatedVirtualCores();
-    assertEquals("appsSubmitted doesn't match", 
+    assertEquals("appsSubmitted doesn't match",
         metrics.getAppsSubmitted(), submittedApps);
-    assertEquals("appsCompleted doesn't match", 
+    assertEquals("appsCompleted doesn't match",
         metrics.getAppsCompleted(), completedApps);
     assertEquals("reservedMB doesn't match",
         metrics.getReservedMB(), reservedMB);
-    assertEquals("availableMB doesn't match", 
+    assertEquals("availableMB doesn't match",
         metrics.getAvailableMB(), availableMB);
-    assertEquals("allocatedMB doesn't match", 
+    assertEquals("allocatedMB doesn't match",
         metrics.getAllocatedMB(), allocMB);
     assertEquals("reservedVirtualCores doesn't match",
         metrics.getReservedVirtualCores(), reservedVirtualCores);
@@ -597,11 +596,13 @@ public class TestRMWebServices extends JerseyTestBase {
 
   public void verifyClusterSchedulerFifo(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 11, info.length());
+
+    LOG.debug("schedulerInfo: {}", info);
+    assertEquals("incorrect number of elements in: " + info, 11, info.length());
 
     verifyClusterSchedulerFifoGeneric(info.getString("type"),
         info.getString("qstate"), (float) info.getDouble("capacity"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 6c6f400..15f94e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -79,7 +79,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 public class TestRMWebServicesApps extends JerseyTestBase {
 
   private static MockRM rm;
-  
+
   private static final int CONTAINER_MB = 1024;
 
   private static class WebServletModule extends ServletModule {
@@ -324,7 +324,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
@@ -375,12 +375,12 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
         array.getJSONObject(1).getString("state").equals("ACCEPTED")));
-    
+
     rm.stop();
   }
 
@@ -511,7 +511,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("finalStatus", FinalApplicationStatus.UNDEFINED.toString())
+        .path("apps").queryParam("finalStatus",
+                    FinalApplicationStatus.UNDEFINED.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
@@ -1804,7 +1805,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     int numAttempt = 1;
     while (true) {
       // fail the AM by sending CONTAINER_FINISHED event without registering.
-      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1,
+              ContainerState.COMPLETE);
       rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FAILED);
       if (numAttempt == maxAppAttempts) {
         rm.waitForState(app1.getApplicationId(), RMAppState.FAILED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
new file mode 100644
index 0000000..83e0056
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
@@ -0,0 +1,242 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler.CustomResourceTypesConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.BufferedClientResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.JsonCustomResourceTypeTestcase;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.ws.rs.core.MediaType;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This test verifies that custom resource types are correctly serialized to XML
+ * and JSON when HTTP GET request is sent to the resource: ws/v1/cluster/apps.
+ */
+public class TestRMWebServicesAppsCustomResourceTypes extends JerseyTestBase {
+
+  private static MockRM rm;
+  private static final int CONTAINER_MB = 1024;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      Configuration conf = new Configuration();
+      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+          YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(Configuration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  public TestRMWebServicesAppsCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testRunningAppXml() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    XmlCustomResourceTypeTestCase testCase =
+            new XmlCustomResourceTypeTestCase(path,
+                    new BufferedClientResponse(response));
+    testCase.verify(document -> {
+      NodeList apps = document.getElementsByTagName("apps");
+      assertEquals("incorrect number of apps elements", 1, apps.getLength());
+
+      NodeList appArray = ((Element)(apps.item(0)))
+              .getElementsByTagName("app");
+      assertEquals("incorrect number of app elements", 1, appArray.getLength());
+
+      verifyAppsXML(appArray, app1);
+    });
+
+    rm.stop();
+  }
+
+  @Test
+  public void testRunningAppJson() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        assertEquals("incorrect number of apps elements", 1, json.length());
+        JSONObject apps = json.getJSONObject("apps");
+        assertEquals("incorrect number of app elements", 1, apps.length());
+        JSONArray array = apps.getJSONArray("app");
+        assertEquals("incorrect count of app", 1, array.length());
+
+        verifyAppInfoJson(array.getJSONObject(0), app1);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+
+    rm.stop();
+  }
+
+  private void verifyAppsXML(NodeList appArray, RMApp app) {
+    for (int i = 0; i < appArray.getLength(); i++) {
+      Element element = (Element) appArray.item(i);
+      AppInfoXmlVerifications.verify(element, app);
+
+      NodeList resourceRequests =
+          element.getElementsByTagName("resourceRequests");
+      assertEquals(1, resourceRequests.getLength());
+      Node resourceRequest = resourceRequests.item(0);
+      ResourceRequest rr =
+          ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+              .getApplicationAttempt(
+                  app.getCurrentAppAttempt().getAppAttemptId())
+              .getAppSchedulingInfo().getAllResourceRequests().get(0);
+      ResourceRequestsXmlVerifications.verifyWithCustomResourceTypes(
+              (Element) resourceRequest, rr,
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+    }
+  }
+
+  private void verifyAppInfoJson(JSONObject info, RMApp app) throws
+          JSONException {
+    int expectedNumberOfElements = getExpectedNumberOfElements(app);
+
+    assertEquals("incorrect number of elements", expectedNumberOfElements,
+        info.length());
+
+    AppInfoJsonVerifications.verify(info, app);
+
+    JSONArray resourceRequests = info.getJSONArray("resourceRequests");
+    JSONObject requestInfo = resourceRequests.getJSONObject(0);
+    ResourceRequest rr =
+        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+            .getApplicationAttempt(app.getCurrentAppAttempt().getAppAttemptId())
+            .getAppSchedulingInfo().getAllResourceRequests().get(0);
+
+    ResourceRequestsJsonVerifications.verifyWithCustomResourceTypes(
+            requestInfo, rr,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private int getExpectedNumberOfElements(RMApp app) {
+    int expectedNumberOfElements = 40 + 2; // 2 -> resourceRequests
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (app.getAMResourceRequests().get(0).getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (AppInfo
+        .getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()) != null) {
+      expectedNumberOfElements++;
+    }
+    return expectedNumberOfElements;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index e37f76f..46d0a66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -146,7 +146,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     config.setUserLimitFactor(B2, 100.0f);
     config.setCapacity(B3, 0.5f);
     config.setUserLimitFactor(B3, 100.0f);
-    
+
     config.setQueues(A1, new String[] {"a1a", "a1b"});
     final String A1A = A1 + ".a1a";
     config.setCapacity(A1A, 85);
@@ -254,7 +254,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     }
   }
 
-  public void verifySubQueueXML(Element qElem, String q, 
+  public void verifySubQueueXML(Element qElem, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws Exception {
     NodeList children = qElem.getChildNodes();
@@ -317,30 +317,34 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
   private void verifyClusterScheduler(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 8, info.length());
+    assertEquals("incorrect number of elements in: " + info, 8, info.length());
     verifyClusterSchedulerGeneric(info.getString("type"),
         (float) info.getDouble("usedCapacity"),
         (float) info.getDouble("capacity"),
         (float) info.getDouble("maxCapacity"), info.getString("queueName"));
     JSONObject health = info.getJSONObject("health");
     assertNotNull(health);
-    assertEquals("incorrect number of elements", 3, health.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        health.length());
     JSONArray operationsInfo = health.getJSONArray("operationsInfo");
-    assertEquals("incorrect number of elements", 4, operationsInfo.length());
+    assertEquals("incorrect number of elements in: " + health, 4,
+        operationsInfo.length());
     JSONArray lastRunDetails = health.getJSONArray("lastRunDetails");
-    assertEquals("incorrect number of elements", 3, lastRunDetails.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        lastRunDetails.length());
 
     JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
-    assertEquals("incorrect number of elements", 2, arr.length());
+    assertEquals("incorrect number of elements in: " + arr, 2, arr.length());
 
     // test subqueues
     for (int i = 0; i < arr.length(); i++) {
       JSONObject obj = arr.getJSONObject(i);
-      String q = CapacitySchedulerConfiguration.ROOT + "." + obj.getString("queueName");
+      String q = CapacitySchedulerConfiguration.ROOT + "." +
+              obj.getString("queueName");
       verifySubQueue(obj, q, 100, 100);
     }
   }
@@ -355,7 +359,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     assertTrue("queueName doesn't match", "root".matches(queueName));
   }
 
-  private void verifySubQueue(JSONObject info, String q, 
+  private void verifySubQueue(JSONObject info, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws JSONException, Exception {
     int numExpectedElements = 20;
@@ -464,7 +468,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
   }
 
-  //Return a child Node of node with the tagname or null if none exists 
+  //Return a child Node of node with the tagname or null if none exists
   private Node getChildNodeByName(Node node, String tagname) {
     NodeList nodeList = node.getChildNodes();
     for (int i=0; i < nodeList.getLength(); ++i) {
@@ -514,7 +518,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
           for (int j=0; j<users.getLength(); ++j) {
             Node user = users.item(j);
             String username = getChildNodeByName(user, "username")
-              .getTextContent(); 
+                .getTextContent();
             assertTrue(username.equals("user1") || username.equals("user2"));
             //Should be a parsable integer
             Integer.parseInt(getChildNodeByName(getChildNodeByName(user,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 3d28f12..99b5648 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response.Status;
@@ -59,6 +61,8 @@ import static org.junit.Assert.assertNull;
  * Test scheduler configuration mutation via REST API.
  */
 public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
+  private static final Logger LOG = LoggerFactory
+          .getLogger(TestRMWebServicesConfigurationMutation.class);
 
   private static final File CONF_FILE = new File(new File("target",
       "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
@@ -396,6 +400,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
             .entity(YarnWebServiceUtils.toJson(updateInfo,
                 SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
+    LOG.debug("Response headers: " + response.getHeaders());
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
     assertEquals(0.2f, newCSConf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index e77785b..58c72ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,13 +16,14 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import javax.ws.rs.core.MediaType;
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
 
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -30,6 +31,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -38,18 +42,18 @@ import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Before;
 import org.junit.Test;
+import javax.ws.rs.core.MediaType;
 
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
+/**
+ * Tests RM Webservices fair scheduler resources.
+ */
 public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   private static MockRM rm;
   private static YarnConfiguration conf;
-  
+
   private static class WebServletModule extends ServletModule {
     @Override
     protected void configureServlets() {
@@ -58,7 +62,7 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
       bind(GenericExceptionHandler.class);
       conf = new YarnConfiguration();
       conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-        ResourceScheduler.class);
+          ResourceScheduler.class);
       rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       serve("/*").with(GuiceContainer.class);
@@ -66,32 +70,32 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   static {
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   @Before
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   public TestRMWebServicesFairScheduler() {
     super(new WebAppDescriptor.Builder(
         "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-        .contextListenerClass(GuiceServletConfig.class)
-        .filterClass(com.google.inject.servlet.GuiceFilter.class)
-        .contextPath("jersey-guice-filter").servletPath("/").build());
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
   }
-  
+
   @Test
-  public void testClusterScheduler() throws JSONException, Exception {
+  public void testClusterScheduler() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -99,52 +103,51 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   @Test
-  public void testClusterSchedulerSlash() throws JSONException, Exception {
+  public void testClusterSchedulerSlash() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler/").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler/")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     verifyClusterScheduler(json);
   }
-  
+
   @Test
-  public void testClusterSchedulerWithSubQueues() throws JSONException,
-      Exception {
-    FairScheduler scheduler = (FairScheduler)rm.getResourceScheduler();
+  public void testClusterSchedulerWithSubQueues()
+      throws JSONException {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
     QueueManager queueManager = scheduler.getQueueManager();
     // create LeafQueue
     queueManager.getLeafQueue("root.q.subqueue1", true);
     queueManager.getLeafQueue("root.q.subqueue2", true);
 
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     JSONArray subQueueInfo = json.getJSONObject("scheduler")
         .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-        .getJSONObject("childQueues").getJSONArray("queue")
-        .getJSONObject(1).getJSONObject("childQueues").getJSONArray("queue");
+        .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(1)
+        .getJSONObject("childQueues").getJSONArray("queue");
     // subQueueInfo is consist of subqueue1 and subqueue2 info
     assertEquals(2, subQueueInfo.length());
 
     // Verify 'childQueues' field is omitted from FairSchedulerLeafQueueInfo.
     try {
       subQueueInfo.getJSONObject(1).getJSONObject("childQueues");
-      fail("FairSchedulerQueueInfo should omit field 'childQueues'" +
-           "if child queue is empty.");
+      fail("FairSchedulerQueueInfo should omit field 'childQueues'"
+          + "if child queue is empty.");
     } catch (JSONException je) {
       assertEquals("JSONObject[\"childQueues\"] not found.", je.getMessage());
     }
   }
 
-  private void verifyClusterScheduler(JSONObject json) throws JSONException,
-      Exception {
+  private void verifyClusterScheduler(JSONObject json) throws JSONException {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
     assertEquals("incorrect number of elements", 1, info.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
index 1e61186..40cf483 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
@@ -457,7 +457,7 @@ public class TestRMWebServicesSchedulerActivities
       if (object.getClass() == JSONObject.class) {
         assertEquals("Number of allocations is wrong", 1, realValue);
       } else if (object.getClass() == JSONArray.class) {
-        assertEquals("Number of allocations is wrong",
+        assertEquals("Number of allocations is wrong in: " + object,
             ((JSONArray) object).length(), realValue);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
new file mode 100644
index 0000000..bb1fce0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.LocalConfigurationProvider;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static java.util.stream.Collectors.toList;
+
+/**
+ * This class can generate an XML configuration file of custom resource types.
+ * See createInitialResourceTypes for the default values. All custom resource
+ * type is prefixed with CUSTOM_RESOURCE_PREFIX. Please use the
+ * getConfigurationInputStream method to get an InputStream of the XML. If you
+ * want to have different number of resources in your tests, please see usages
+ * of this class in this test class:
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}
+ *
+ */
+public class CustomResourceTypesConfigurationProvider
+    extends LocalConfigurationProvider {
+
+  private static class CustomResourceTypes {
+    private int count;
+    private String xml;
+
+    CustomResourceTypes(String xml, int count) {
+      this.xml = xml;
+      this.count = count;
+    }
+
+    public int getCount() {
+      return count;
+    }
+
+    public String getXml() {
+      return xml;
+    }
+  }
+
+  private static final String CUSTOM_RESOURCE_PREFIX = "customResource-";
+
+  private static CustomResourceTypes customResourceTypes =
+      createInitialResourceTypes();
+
+  private static CustomResourceTypes createInitialResourceTypes() {
+    return createCustomResourceTypes(2);
+  }
+
+  private static CustomResourceTypes createCustomResourceTypes(int count) {
+    List<String> resourceTypeNames = generateResourceTypeNames(count);
+
+    List<String> resourceUnitXmlElements = IntStream.range(0, count)
+            .boxed()
+            .map(i -> getResourceUnitsXml(resourceTypeNames.get(i)))
+            .collect(toList());
+
+    StringBuilder sb = new StringBuilder("<configuration>\n");
+    sb.append(getResourceTypesXml(resourceTypeNames));
+
+    for (String resourceUnitXml : resourceUnitXmlElements) {
+      sb.append(resourceUnitXml);
+
+    }
+    sb.append("</configuration>");
+
+    return new CustomResourceTypes(sb.toString(), count);
+  }
+
+  private static List<String> generateResourceTypeNames(int count) {
+    return IntStream.range(0, count)
+            .boxed()
+            .map(i -> CUSTOM_RESOURCE_PREFIX + i)
+            .collect(toList());
+  }
+
+  private static String getResourceUnitsXml(String resource) {
+    return "<property>\n" + "<name>yarn.resource-types." + resource
+        + ".units</name>\n" + "<value>k</value>\n" + "</property>\n";
+  }
+
+  private static String getResourceTypesXml(List<String> resources) {
+    final String resourceTypes = makeCommaSeparatedString(resources);
+
+    return "<property>\n" + "<name>yarn.resource-types</name>\n" + "<value>"
+        + resourceTypes + "</value>\n" + "</property>\n";
+  }
+
+  private static String makeCommaSeparatedString(List<String> resources) {
+    return resources.stream().collect(Collectors.joining(","));
+  }
+
+  @Override
+  public InputStream getConfigurationInputStream(Configuration bootstrapConf,
+      String name) throws YarnException, IOException {
+    if (YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE.equals(name)) {
+      return new ByteArrayInputStream(
+          customResourceTypes.getXml().getBytes());
+    } else {
+      return super.getConfigurationInputStream(bootstrapConf, name);
+    }
+  }
+
+  public static void reset() {
+    customResourceTypes = createInitialResourceTypes();
+  }
+
+  public static void setNumberOfResourceTypes(int count) {
+    customResourceTypes = createCustomResourceTypes(count);
+  }
+
+  public static List<String> getCustomResourceTypes() {
+    return generateResourceTypeNames(customResourceTypes.getCount());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
new file mode 100644
index 0000000..924411a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerJsonVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS =
+      Sets.newHashSet("minResources", "amUsedResources", "amMaxResources",
+          "fairResources", "clusterResources", "reservedResources",
+              "maxResources", "usedResources", "steadyFairResources",
+              "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerJsonVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(JSONObject jsonObject) {
+    try {
+      verifyResourcesContainDefaultResourceTypes(jsonObject, RESOURCE_FIELDS);
+      verifyResourcesContainCustomResourceTypes(jsonObject, RESOURCE_FIELDS);
+    } catch (JSONException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = queue.has(resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      JSONObject jsonObject) {
+    Object memory = jsonObject.opt("memory");
+    Object vCores = jsonObject.opt("vCores");
+
+    assertNotNull("Key 'memory' not found in: " + jsonObject, memory);
+    assertNotNull("Key 'vCores' not found in: " + jsonObject, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, queue.has(resourceCategory));
+      verifyResourceContainsAllCustomResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsAllCustomResourceTypes(
+      JSONObject resourceCategory) throws JSONException {
+    assertTrue("resourceCategory does not have resourceInformations: "
+        + resourceCategory, resourceCategory.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        resourceCategory.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.length() - 2);
+
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
new file mode 100644
index 0000000..63ae7b7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerXmlVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS = Sets.newHashSet(
+      "minResources", "amUsedResources", "amMaxResources", "fairResources",
+      "clusterResources", "reservedResources", "maxResources", "usedResources",
+      "steadyFairResources", "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerXmlVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(Element element) {
+    verifyResourcesContainDefaultResourceTypes(element, RESOURCE_FIELDS);
+    verifyResourcesContainCustomResourceTypes(element, RESOURCE_FIELDS);
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = hasChild(queue, resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      Element element) {
+    Object memory = opt(element, "memory");
+    Object vCores = opt(element, "vCores");
+
+    assertNotNull("Key 'memory' not found in: " + element, memory);
+    assertNotNull("Key 'vCores' not found in: " + element, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have key for resourceCategory: "
+          + resourceCategory, hasChild(queue, resourceCategory));
+      verifyResourceContainsCustomResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsCustomResourceTypes(
+      Element resourceCategory) {
+    assertEquals(
+        toXml(resourceCategory)
+            + " should have only one resourceInformations child!",
+        1, resourceCategory.getElementsByTagName("resourceInformations")
+            .getLength());
+    Element resourceInformations = (Element) resourceCategory
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.getLength() - 2);
+
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+    }
+  }
+
+  private Object opt(Node node, String child) {
+    NodeList nodes = getElementsByTagNameInternal(node, child);
+    if (nodes.getLength() > 0) {
+      return nodes.item(0);
+    }
+
+    return null;
+  }
+
+  private boolean hasChild(Node node, String child) {
+    return getElementsByTagNameInternal(node, child).getLength() > 0;
+  }
+
+  private NodeList getElementsByTagNameInternal(Node node, String child) {
+    if (node instanceof Element) {
+      return ((Element) node).getElementsByTagName(child);
+    } else if (node instanceof Document) {
+      return ((Document) node).getElementsByTagName(child);
+    } else {
+      throw new IllegalStateException("Unknown type of wrappedObject: " + node
+          + ", type: " + node.getClass());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
new file mode 100644
index 0000000..de4d5a1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.*;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import javax.ws.rs.core.MediaType;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class is to test response representations of queue resources,
+ * explicitly setting custom resource types. with the help of
+ * {@link CustomResourceTypesConfigurationProvider}
+ */
+public class TestRMWebServicesFairSchedulerCustomResourceTypes
+    extends JerseyTestBase {
+  private static MockRM rm;
+  private static YarnConfiguration conf;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      conf = new YarnConfiguration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(YarnConfiguration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  @After
+  public void tearDown() {
+    ResourceUtils.resetResourceTypes(new Configuration());
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  @After
+  public void teardown() {
+    CustomResourceTypesConfigurationProvider.reset();
+  }
+
+  public TestRMWebServicesFairSchedulerCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesJson() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesXml() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithElevenCustomResourceTypesXml() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerElevenWithCustomResourceTypesJson() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private void verifyJsonResponse(WebResource path, ClientResponse response,
+      List<String> customResourceTypes) {
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        JSONArray queues = json.getJSONObject("scheduler")
+            .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
+            .getJSONObject("childQueues").getJSONArray("queue");
+
+        // childQueueInfo consists of subqueue1 and subqueue2 info
+        assertEquals(2, queues.length());
+        JSONObject firstChildQueue = queues.getJSONObject(0);
+        new FairSchedulerJsonVerifications(customResourceTypes)
+            .verify(firstChildQueue);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+  }
+
+  private void verifyXmlResponse(WebResource path, ClientResponse response,
+          List<String> customResourceTypes) {
+    XmlCustomResourceTypeTestCase testCase = new XmlCustomResourceTypeTestCase(
+        path, new BufferedClientResponse(response));
+
+    testCase.verify(xml -> {
+      Element scheduler =
+          (Element) xml.getElementsByTagName("scheduler").item(0);
+      Element schedulerInfo =
+          (Element) scheduler.getElementsByTagName("schedulerInfo").item(0);
+      Element rootQueue =
+          (Element) schedulerInfo.getElementsByTagName("rootQueue").item(0);
+
+      Element childQueues =
+          (Element) rootQueue.getElementsByTagName("childQueues").item(0);
+      Element queue =
+          (Element) childQueues.getElementsByTagName("queue").item(0);
+      new FairSchedulerXmlVerifications(customResourceTypes).verify(queue);
+    });
+  }
+
+  private void incrementUsedResourcesOnQueue(final FSLeafQueue queue,
+      final long value) {
+    try {
+      Method incUsedResourceMethod = queue.getClass().getSuperclass()
+          .getDeclaredMethod("incUsedResource", Resource.class);
+      incUsedResourceMethod.setAccessible(true);
+
+      Map<String, Long> customResources =
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes()
+              .stream()
+              .collect(Collectors.toMap(Function.identity(), v -> value));
+
+      incUsedResourceMethod.invoke(queue,
+          Resource.newInstance(20, 30, customResources));
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
new file mode 100644
index 0000000..4ab1443
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringEqual;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.junit.Assert.*;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * JSON objects.
+ */
+public final class AppInfoJsonVerifications {
+
+  private AppInfoJsonVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param  app  an RMApp instance that contains the required values
+   *              to test against.
+   */
+  public static void verify(JSONObject info, RMApp app) throws JSONException {
+    checkStringMatch("id", app.getApplicationId().toString(),
+        info.getString("id"));
+    checkStringMatch("user", app.getUser(), info.getString("user"));
+    checkStringMatch("name", app.getName(), info.getString("name"));
+    checkStringMatch("applicationType", app.getApplicationType(),
+        info.getString("applicationType"));
+    checkStringMatch("queue", app.getQueue(), info.getString("queue"));
+    assertEquals("priority doesn't match", 0, info.getInt("priority"));
+    checkStringMatch("state", app.getState().toString(),
+        info.getString("state"));
+    checkStringMatch("finalStatus", app.getFinalApplicationStatus().toString(),
+        info.getString("finalStatus"));
+    assertEquals("progress doesn't match", 0,
+        (float) info.getDouble("progress"), 0.0);
+    if ("UNASSIGNED".equals(info.getString("trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+          info.getString("trackingUI"));
+    }
+    checkStringEqual("diagnostics", app.getDiagnostics().toString(),
+        info.getString("diagnostics"));
+    assertEquals("clusterId doesn't match",
+        ResourceManager.getClusterTimeStamp(), info.getLong("clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+        info.getLong("startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+        info.getLong("finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+        info.getLong("elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress",
+        app.getCurrentAppAttempt().getMasterContainer().getNodeHttpAddress(),
+        info.getString("amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        info.getString("amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        info.getString("amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024, info.getInt("allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+        info.getInt("allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        info.getInt("runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+        info.getJSONObject("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getMemorySize(),
+        info.getInt("preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+        info.getInt("preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumNonAMContainersPreempted(),
+        info.getInt("numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumAMContainersPreempted(),
+        info.getInt("numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match",
+        app.getLogAggregationStatusForAppReport().toString(),
+        info.getString("logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match",
+        app.getApplicationSubmissionContext().getUnmanagedAM(),
+        info.getBoolean("unmanagedApplication"));
+
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      assertEquals("appNodeLabelExpression doesn't match",
+          app.getApplicationSubmissionContext().getNodeLabelExpression(),
+          info.getString("appNodeLabelExpression"));
+    }
+    assertEquals("amNodeLabelExpression doesn't match",
+        app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+        info.getString("amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+        AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+        info.getString("amRPCAddress"));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDFS-13674. Improve documentation on Metrics. Contributed by Chao Sun.

Posted by bh...@apache.org.
HDFS-13674. Improve documentation on Metrics. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a68ac60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a68ac60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a68ac60

Branch: refs/heads/HDDS-48
Commit: 7a68ac607c52c8a28dcd75a367ae77331787a3b4
Parents: 790c563
Author: Yiqun Lin <yq...@apache.org>
Authored: Mon Jul 9 14:27:34 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Mon Jul 9 14:27:34 2018 +0800

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md  | 39 ++++++++++++--------
 1 file changed, 24 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a68ac60/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 676ab0b..2c7bd4d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -159,14 +159,17 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
 | `TransactionsAvgTime` | Average time of Journal transactions in milliseconds |
 | `SyncsNumOps` | Total number of Journal syncs |
 | `SyncsAvgTime` | Average time of Journal syncs in milliseconds |
+| `SyncsTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of Journal sync time in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TransactionsBatchedInSync` | Total number of Journal transactions batched in sync |
+| `TransactionsBatchedInSync`*num*`s(50/75/90/95/99)thPercentileCount` | The 50/75/90/95/99th percentile of number of batched Journal transactions (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `StorageBlockReportNumOps` | Total number of processing block reports from individual storages in DataNode |
 | `StorageBlockReportAvgTime` | Average time of processing block reports in milliseconds |
+| `StorageBlockReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of block report processing time in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `CacheReportNumOps` | Total number of processing cache reports from DataNode |
 | `CacheReportAvgTime` | Average time of processing cache reports in milliseconds |
+| `CacheReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of cached report processing time in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SafeModeTime` | The interval between FSNameSystem starts and the last time safemode leaves in milliseconds.  (sometimes not equal to the time in SafeMode, see [HDFS-5156](https://issues.apache.org/jira/browse/HDFS-5156)) |
 | `FsImageLoadTime` | Time loading FS Image at startup in milliseconds |
-| `FsImageLoadTime` | Time loading FS Image at startup in milliseconds |
 | `GetEditNumOps` | Total number of edits downloads from SecondaryNameNode |
 | `GetEditAvgTime` | Average edits download time in milliseconds |
 | `GetImageNumOps` | Total number of fsimage downloads from SecondaryNameNode |
@@ -177,22 +180,23 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
 | `NNStartedTimeInMillis`| NameNode start time in milliseconds |
 | `GenerateEDEKTimeNumOps` | Total number of generating EDEK |
 | `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
+| `GenerateEDEKTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in generating EDEK in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
 | `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
-| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
-| `StorageBlockReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `WarmUpEDEKTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in warming up EDEK in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of of NameNode resource check latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `EditLogTailTimeNumOps` | Total number of times the standby NameNode tailed the edit log |
 | `EditLogTailTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in tailing edit log |
-| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `EditLogFetchTimeNumOps` | Total number of times the standby NameNode fetched remote edit streams from journal nodes |
 | `EditLogFetchTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in fetching remote edit streams from journal nodes |
-| `EditLogFetchTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in fetching edit streams from journal nodes by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `EditLogFetchTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in fetching edit streams from journal nodes by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `NumEditLogLoadedNumOps` | Total number of times edits were loaded by standby NameNode |
 | `NumEditLogLoadedAvgCount` | Average number of edits loaded by standby NameNode in each edit log tailing |
-| `NumEditLogLoaded`*num*`s(50/75/90/95/99)thPercentileCount` | The 50/75/90/95/99th percentile of number of edits loaded by standby NameNode in each edit log tailing. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `NumEditLogLoaded`*num*`s(50/75/90/95/99)thPercentileCount` | The 50/75/90/95/99th percentile of number of edits loaded by standby NameNode in each edit log tailing (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `EditLogTailIntervalNumOps` | Total number of intervals between edit log tailings by standby NameNode |
 | `EditLogTailIntervalAvgTime` | Average time of intervals between edit log tailings by standby NameNode in milliseconds |
-| `EditLogTailInterval`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time between edit log tailings by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `EditLogTailInterval`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time between edit log tailings by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 
 FSNamesystem
 ------------
@@ -338,13 +342,13 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `RamDiskBlocksEvictedWithoutRead` | Total number of blocks evicted in memory without ever being read from memory |
 | `RamDiskBlocksEvictionWindowMsNumOps` | Number of blocks evicted in memory|
 | `RamDiskBlocksEvictionWindowMsAvgTime` | Average time of blocks in memory before being evicted in milliseconds |
-| `RamDiskBlocksEvictionWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksEvictionWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `RamDiskBlocksLazyPersisted` | Total number of blocks written to disk by lazy writer |
 | `RamDiskBlocksDeletedBeforeLazyPersisted` | Total number of blocks deleted by application before being persisted to disk |
 | `RamDiskBytesLazyPersisted` | Total number of bytes written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsNumOps` | Number of blocks written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsAvgTime` | Average time of blocks written to disk by lazy writer in milliseconds |
-| `RamDiskBlocksLazyPersistWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksLazyPersistWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FsyncCount` | Total number of fsync |
 | `VolumeFailures` | Total number of volume failures occurred |
 | `ReadBlockOpNumOps` | Total number of read operations |
@@ -371,14 +375,19 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `CacheReportsAvgTime` | Average time of cache report operations in milliseconds |
 | `PacketAckRoundTripTimeNanosNumOps` | Total number of ack round trip |
 | `PacketAckRoundTripTimeNanosAvgTime` | Average time from ack send to receive minus the downstream ack time in nanoseconds |
+| `PacketAckRoundTripTimeNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile latency from ack send to receive minus the downstream ack time in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FlushNanosNumOps` | Total number of flushes |
 | `FlushNanosAvgTime` | Average flush time in nanoseconds |
+| `FlushNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile flush time in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FsyncNanosNumOps` | Total number of fsync |
 | `FsyncNanosAvgTime` | Average fsync time in nanoseconds |
+| `FsyncNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile fsync time in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SendDataPacketBlockedOnNetworkNanosNumOps` | Total number of sending packets |
 | `SendDataPacketBlockedOnNetworkNanosAvgTime` | Average waiting time of sending packets in nanoseconds |
+| `SendDataPacketBlockedOnNetworkNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile waiting time of sending packets in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SendDataPacketTransferNanosNumOps` | Total number of sending packets |
 | `SendDataPacketTransferNanosAvgTime` | Average transfer time of sending packets in nanoseconds |
+| `SendDataPacketTransferNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile transfer time of sending packets in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalWriteTime`| Total number of milliseconds spent on write operation |
 | `TotalReadTime` | Total number of milliseconds spent on read operation |
 | `RemoteBytesRead` | Number of bytes read by remote clients |
@@ -410,23 +419,23 @@ contains tags such as Hostname as additional information along with metrics.
 | `TotalMetadataOperations` | Total number (monotonically increasing) of metadata operations. Metadata operations include stat, list, mkdir, delete, move, open and posix_fadvise. |
 | `MetadataOperationRateNumOps` | The number of metadata operations within an interval time of metric |
 | `MetadataOperationRateAvgTime` | Mean time of metadata operations in milliseconds |
-| `MetadataOperationLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `MetadataOperationLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalDataFileIos` | Total number (monotonically increasing) of data file io operations |
 | `DataFileIoRateNumOps` | The number of data file io operations within an interval time of metric |
 | `DataFileIoRateAvgTime` | Mean time of data file io operations in milliseconds |
-| `DataFileIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `DataFileIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FlushIoRateNumOps` | The number of file flush io operations within an interval time of metric |
 | `FlushIoRateAvgTime` | Mean time of file flush io operations in milliseconds |
-| `FlushIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `FlushIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SyncIoRateNumOps` | The number of file sync io operations within an interval time of metric |
 | `SyncIoRateAvgTime` | Mean time of file sync io operations in milliseconds |
-| `SyncIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `SyncIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `ReadIoRateNumOps` | The number of file read io operations within an interval time of metric |
 | `ReadIoRateAvgTime` | Mean time of file read io operations in milliseconds |
-| `ReadIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ReadIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `WriteIoRateNumOps` | The number of file write io operations within an interval time of metric |
 | `WriteIoRateAvgTime` | Mean time of file write io operations in milliseconds |
-| `WriteIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `WriteIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalFileIoErrors` | Total number (monotonically increasing) of file io error operations |
 | `FileIoErrorRateNumOps` | The number of file io error operations within an interval time of metric |
 | `FileIoErrorRateAvgTime` | It measures the mean time in milliseconds from the start of an operation to hitting a failure |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HADOOP-15591. KMSClientProvider should log KMS DT acquisition at INFO level. Contributed by Kitti Nanasi.

Posted by bh...@apache.org.
HADOOP-15591. KMSClientProvider should log KMS DT acquisition at INFO level. Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/def9d94a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/def9d94a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/def9d94a

Branch: refs/heads/HDDS-48
Commit: def9d94a40e1ff71a0dc5a4db1f042e2704cb84d
Parents: 83cd84b
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Jul 9 12:00:32 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Jul 9 12:01:52 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9d94a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 7b46075..11815da 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -1036,13 +1036,13 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
           public Token<?> run() throws Exception {
             // Not using the cached token here.. Creating a new token here
             // everytime.
-            LOG.debug("Getting new token from {}, renewer:{}", url, renewer);
+            LOG.info("Getting new token from {}, renewer:{}", url, renewer);
             return authUrl.getDelegationToken(url,
                 new DelegationTokenAuthenticatedURL.Token(), renewer, doAsUser);
           }
         });
         if (token != null) {
-          LOG.debug("New token received: ({})", token);
+          LOG.info("New token received: ({})", token);
           credentials.addToken(token.getService(), token);
           tokens = new Token<?>[] { token };
         } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
new file mode 100644
index 0000000..7c5b6db
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.w3c.dom.Element;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlFloat;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * XML documents.
+ */
+public final class AppInfoXmlVerifications {
+
+  private AppInfoXmlVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param info
+   * @param  app  an RMApp instance that contains the required values
+   */
+  public static void verify(Element info, RMApp app) {
+    checkStringMatch("id", app.getApplicationId()
+            .toString(), getXmlString(info, "id"));
+    checkStringMatch("user", app.getUser(),
+            getXmlString(info, "user"));
+    checkStringMatch("name", app.getName(),
+            getXmlString(info, "name"));
+    checkStringMatch("applicationType",
+            app.getApplicationType(), getXmlString(info, "applicationType"));
+    checkStringMatch("queue", app.getQueue(),
+            getXmlString(info, "queue"));
+    assertEquals("priority doesn't match", 0, getXmlInt(info, "priority"));
+    checkStringMatch("state", app.getState().toString(),
+            getXmlString(info, "state"));
+    checkStringMatch("finalStatus", app
+            .getFinalApplicationStatus().toString(),
+            getXmlString(info, "finalStatus"));
+    assertEquals("progress doesn't match", 0, getXmlFloat(info, "progress"),
+        0.0);
+    if ("UNASSIGNED".equals(getXmlString(info, "trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+              getXmlString(info, "trackingUI"));
+    }
+    WebServicesTestUtils.checkStringEqual("diagnostics",
+            app.getDiagnostics().toString(), getXmlString(info, "diagnostics"));
+    assertEquals("clusterId doesn't match",
+            ResourceManager.getClusterTimeStamp(),
+            getXmlLong(info, "clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+            getXmlLong(info, "startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+            getXmlLong(info, "finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+            getXmlLong(info, "elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress", app
+                    .getCurrentAppAttempt().getMasterContainer()
+                    .getNodeHttpAddress(),
+            getXmlString(info, "amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        getXmlString(info, "amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        getXmlString(info, "amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024,
+            getXmlInt(info, "allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+            getXmlInt(info, "allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        getXmlInt(info, "runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+            info.getElementsByTagName("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getMemorySize(),
+            getXmlInt(info, "preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+            getXmlInt(info, "preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumNonAMContainersPreempted(),
+            getXmlInt(info, "numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumAMContainersPreempted(),
+            getXmlInt(info, "numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match", app
+                    .getLogAggregationStatusForAppReport().toString(),
+            getXmlString(info, "logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match", app
+                    .getApplicationSubmissionContext().getUnmanagedAM(),
+            getXmlBoolean(info, "unmanagedApplication"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getApplicationSubmissionContext().getNodeLabelExpression(),
+            getXmlString(info, "appNodeLabelExpression"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+            getXmlString(info, "amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+            AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+            getXmlString(info, "amRPCAddress"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
new file mode 100644
index 0000000..a8990ca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.UniformInterfaceException;
+
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+
+/**
+ * This class is merely a wrapper for {@link ClientResponse}. Given that the
+ * entity input stream of {@link ClientResponse} can be read only once by
+ * default and for some tests it is convenient to read the input stream many
+ * times, this class hides the details of how to do that and prevents
+ * unnecessary code duplication in tests.
+ */
+public class BufferedClientResponse {
+  private ClientResponse response;
+
+  public BufferedClientResponse(ClientResponse response) {
+    response.bufferEntity();
+    this.response = response;
+  }
+
+  public <T> T getEntity(Class<T> clazz)
+          throws ClientHandlerException, UniformInterfaceException {
+    try {
+      response.getEntityInputStream().reset();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return response.getEntity(clazz);
+  }
+
+  public MediaType getType() {
+    return response.getType();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
new file mode 100644
index 0000000..9d6a111
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.core.MediaType;
+
+import java.util.function.Consumer;
+
+import static org.junit.Assert.*;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * JSON responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link JsonCustomResourceTypeTestcase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class JsonCustomResourceTypeTestcase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JsonCustomResourceTypeTestcase.class);
+
+  private final WebResource path;
+  private final BufferedClientResponse response;
+  private final JSONObject parsedResponse;
+
+  public JsonCustomResourceTypeTestcase(WebResource path,
+                                        BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+    this.parsedResponse = response.getEntity(JSONObject.class);
+  }
+
+  public void verify(Consumer<JSONObject> verifier) {
+    assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    logResponse();
+
+    String responseStr = response.getEntity(String.class);
+    if (responseStr == null || responseStr.isEmpty()) {
+      throw new IllegalStateException("Response is null or empty!");
+    }
+    verifier.accept(parsedResponse);
+  }
+
+  private void logResponse() {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        parsedResponse);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
new file mode 100644
index 0000000..6e58a89
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Map;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsJsonVerifications {
+  private final ResourceRequest resourceRequest;
+  private final JSONObject requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsJsonVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verify(JSONObject requestInfo, ResourceRequest rr)
+      throws JSONException {
+    createDefaultBuilder(requestInfo, rr).build().verify();
+  }
+
+  public static void verifyWithCustomResourceTypes(JSONObject requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes)
+      throws JSONException {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(
+            extractActualCustomResourceTypes(requestInfo, expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(JSONObject requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsJsonVerifications.Builder()
+            .withRequest(resourceRequest)
+            .withRequestInfoJson(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceTypes(
+      JSONObject requestInfo, List<String> expectedResourceTypes)
+      throws JSONException {
+    JSONObject capability = requestInfo.getJSONObject("capability");
+    Map<String, Long> resourceAndValue =
+        extractCustomResorceTypeValues(capability, expectedResourceTypes);
+    Map.Entry<String, Long> resourceEntry =
+        resourceAndValue.entrySet().iterator().next();
+
+    assertTrue(
+        "Found resource type: " + resourceEntry.getKey()
+            + " is not in expected resource types: " + expectedResourceTypes,
+        expectedResourceTypes.contains(resourceEntry.getKey()));
+
+    return resourceAndValue;
+  }
+
+  private static Map<String, Long> extractCustomResorceTypeValues(
+      JSONObject capability, List<String> expectedResourceTypes)
+      throws JSONException {
+    assertTrue(
+        "resourceCategory does not have resourceInformations: " + capability,
+        capability.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        capability.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.length() - 2);
+
+    Map<String, Long> resourceValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+      resourceValues.put(name, value);
+    }
+
+    return resourceValues;
+  }
+
+  private void verify() throws JSONException {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+            requestInfo.getString("nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+            resourceRequest.getNumContainers(),
+            requestInfo.getInt("numContainers"));
+    assertEquals("relaxLocality doesn't match",
+            resourceRequest.getRelaxLocality(),
+            requestInfo.getBoolean("relaxLocality"));
+    assertEquals("priority does not match",
+            resourceRequest.getPriority().getPriority(),
+            requestInfo.getInt("priority"));
+    assertEquals("resourceName does not match",
+            resourceRequest.getResourceName(),
+            requestInfo.getString("resourceName"));
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+            requestInfo.getJSONObject("capability").getLong("memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+            requestInfo.getJSONObject("capability").getLong("vCores"));
+
+    verifyAtLeastOneCustomResourceIsSerialized();
+
+    JSONObject executionTypeRequest =
+            requestInfo.getJSONObject("executionTypeRequest");
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+            executionTypeRequest.getString("executionType"));
+    assertEquals("enforceExecutionType does not match",
+            resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+            executionTypeRequest.getBoolean("enforceExecutionType"));
+  }
+
+  /**
+   * JSON serialization produces "invalid JSON" by default as maps are
+   * serialized like this:
+   * "customResources":{"entry":{"key":"customResource-1","value":"0"}}
+   * If the map has multiple keys then multiple entries will be serialized.
+   * Our json parser in tests cannot handle duplicates therefore only one
+   * custom resource will be in the parsed json. See:
+   * https://issues.apache.org/jira/browse/YARN-7505
+   */
+  private void verifyAtLeastOneCustomResourceIsSerialized() {
+    boolean resourceFound = false;
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      if (customResourceTypes.containsKey(expectedCustomResourceType)) {
+        resourceFound = true;
+        Long resourceValue =
+            customResourceTypes.get(expectedCustomResourceType);
+        assertNotNull("Resource value should not be null!", resourceValue);
+      }
+    }
+    assertTrue("No custom resource type can be found in the response!",
+        resourceFound);
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsJsonVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private JSONObject requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+            List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(
+            Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfoJson(JSONObject requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsJsonVerifications build() {
+      return new ResourceRequestsJsonVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
new file mode 100644
index 0000000..af9b0f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsXmlVerifications {
+  private final ResourceRequest resourceRequest;
+  private final Element requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsXmlVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verifyWithCustomResourceTypes(Element requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes) {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(extractActualCustomResourceType(requestInfo,
+            expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(Element requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsXmlVerifications.Builder()
+        .withRequest(resourceRequest).withRequestInfo(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceType(
+      Element requestInfo, List<String> expectedResourceTypes) {
+    Element capability =
+        (Element) requestInfo.getElementsByTagName("capability").item(0);
+
+    return extractCustomResorceTypes(capability,
+        Sets.newHashSet(expectedResourceTypes));
+  }
+
+  private static Map<String, Long> extractCustomResorceTypes(Element capability,
+      Set<String> expectedResourceTypes) {
+    assertEquals(
+        toXml(capability) + " should have only one resourceInformations child!",
+        1, capability.getElementsByTagName("resourceInformations").getLength());
+    Element resourceInformations = (Element) capability
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.getLength() - 2);
+
+    Map<String, Long> resourceTypesAndValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+      resourceTypesAndValues.put(name, value);
+    }
+
+    return resourceTypesAndValues;
+  }
+
+  private void verify() {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+        getXmlString(requestInfo, "nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+        resourceRequest.getNumContainers(),
+        getXmlInt(requestInfo, "numContainers"));
+    assertEquals("relaxLocality doesn't match",
+        resourceRequest.getRelaxLocality(),
+        getXmlBoolean(requestInfo, "relaxLocality"));
+    assertEquals("priority does not match",
+        resourceRequest.getPriority().getPriority(),
+        getXmlInt(requestInfo, "priority"));
+    assertEquals("resourceName does not match",
+        resourceRequest.getResourceName(),
+        getXmlString(requestInfo, "resourceName"));
+    Element capability = (Element) requestInfo
+            .getElementsByTagName("capability").item(0);
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+        getXmlLong(capability, "memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+        getXmlLong(capability, "vCores"));
+
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      assertTrue(
+          "Custom resource type " + expectedCustomResourceType
+              + " cannot be found!",
+          customResourceTypes.containsKey(expectedCustomResourceType));
+
+      Long resourceValue = customResourceTypes.get(expectedCustomResourceType);
+      assertNotNull("Resource value should not be null!", resourceValue);
+    }
+
+    Element executionTypeRequest = (Element) requestInfo
+        .getElementsByTagName("executionTypeRequest").item(0);
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+        getXmlString(executionTypeRequest, "executionType"));
+    assertEquals("enforceExecutionType does not match",
+        resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+        getXmlBoolean(executionTypeRequest, "enforceExecutionType"));
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsXmlVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private Element requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+        List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfo(Element requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsXmlVerifications build() {
+      return new ResourceRequestsXmlVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
new file mode 100644
index 0000000..29260aa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+import org.xml.sax.InputSource;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.*;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.function.Consumer;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * XML responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link XmlCustomResourceTypeTestCase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class XmlCustomResourceTypeTestCase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(XmlCustomResourceTypeTestCase.class);
+
+  private WebResource path;
+  private BufferedClientResponse response;
+  private Document parsedResponse;
+
+  public XmlCustomResourceTypeTestCase(WebResource path,
+                                       BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+  }
+
+  public void verify(Consumer<Document> verifier) {
+    assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    parsedResponse = parseXml(response);
+    logResponse(parsedResponse);
+    verifier.accept(parsedResponse);
+  }
+
+  private Document parseXml(BufferedClientResponse response) {
+    try {
+      String xml = response.getEntity(String.class);
+      DocumentBuilder db =
+          DocumentBuilderFactory.newInstance().newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+
+      return db.parse(is);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void logResponse(Document doc) {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        toXml(doc));
+  }
+
+  public static String toXml(Node node) {
+    StringWriter writer;
+    try {
+      TransformerFactory tf = TransformerFactory.newInstance();
+      Transformer transformer = tf.newTransformer();
+      transformer.setOutputProperty(OutputKeys.INDENT, "yes");
+      transformer.setOutputProperty(
+          "{http://xml.apache.org/xslt}indent" + "-amount", "2");
+      writer = new StringWriter();
+      transformer.transform(new DOMSource(node), new StreamResult(writer));
+    } catch (TransformerException e) {
+      throw new RuntimeException(e);
+    }
+
+    return writer.getBuffer().toString();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
new file mode 100644
index 0000000..e50145d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
@@ -0,0 +1,390 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BatchOperation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_USER_MAX_VOLUME_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_USER_MAX_VOLUME;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+
+/**
+ * OM volume management code.
+ */
+public class VolumeManagerImpl implements VolumeManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(VolumeManagerImpl.class);
+
+  private final OMMetadataManager metadataManager;
+  private final int maxUserVolumeCount;
+
+  /**
+   * Constructor.
+   * @param conf - Ozone configuration.
+   * @throws IOException
+   */
+  public VolumeManagerImpl(OMMetadataManager metadataManager,
+      OzoneConfiguration conf) throws IOException {
+    this.metadataManager = metadataManager;
+    this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME,
+        OZONE_OM_USER_MAX_VOLUME_DEFAULT);
+  }
+
+  // Helpers to add and delete volume from user list
+  private void addVolumeToOwnerList(String volume, String owner,
+      BatchOperation batchOperation) throws IOException {
+    // Get the volume list
+    byte[] dbUserKey = metadataManager.getUserKey(owner);
+    byte[] volumeList  = metadataManager.get(dbUserKey);
+    List<String> prevVolList = new LinkedList<>();
+    if (volumeList != null) {
+      VolumeList vlist = VolumeList.parseFrom(volumeList);
+      prevVolList.addAll(vlist.getVolumeNamesList());
+    }
+
+    // Check the volume count
+    if (prevVolList.size() >= maxUserVolumeCount) {
+      LOG.debug("Too many volumes for user:{}", owner);
+      throw new OMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
+    }
+
+    // Add the new volume to the list
+    prevVolList.add(volume);
+    VolumeList newVolList = VolumeList.newBuilder()
+        .addAllVolumeNames(prevVolList).build();
+    batchOperation.put(dbUserKey, newVolList.toByteArray());
+  }
+
+  private void delVolumeFromOwnerList(String volume, String owner,
+                                      BatchOperation batchOperation)
+      throws IOException {
+    // Get the volume list
+    byte[] dbUserKey = metadataManager.getUserKey(owner);
+    byte[] volumeList  = metadataManager.get(dbUserKey);
+    List<String> prevVolList = new LinkedList<>();
+    if (volumeList != null) {
+      VolumeList vlist = VolumeList.parseFrom(volumeList);
+      prevVolList.addAll(vlist.getVolumeNamesList());
+    } else {
+      LOG.debug("volume:{} not found for user:{}");
+      throw new OMException(ResultCodes.FAILED_USER_NOT_FOUND);
+    }
+
+    // Remove the volume from the list
+    prevVolList.remove(volume);
+    if (prevVolList.size() == 0) {
+      batchOperation.delete(dbUserKey);
+    } else {
+      VolumeList newVolList = VolumeList.newBuilder()
+          .addAllVolumeNames(prevVolList).build();
+      batchOperation.put(dbUserKey, newVolList.toByteArray());
+    }
+  }
+
+  /**
+   * Creates a volume.
+   * @param args - OmVolumeArgs.
+   */
+  @Override
+  public void createVolume(OmVolumeArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(args.getVolume());
+      byte[] volumeInfo = metadataManager.get(dbVolumeKey);
+
+      // Check of the volume already exists
+      if (volumeInfo != null) {
+        LOG.debug("volume:{} already exists", args.getVolume());
+        throw new OMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
+      }
+
+      BatchOperation batch = new BatchOperation();
+      // Write the vol info
+      List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
+      for (Map.Entry<String, String> entry : args.getKeyValueMap().entrySet()) {
+        metadataList.add(HddsProtos.KeyValue.newBuilder()
+            .setKey(entry.getKey()).setValue(entry.getValue()).build());
+      }
+      List<OzoneAclInfo> aclList = args.getAclMap().ozoneAclGetProtobuf();
+
+      VolumeInfo newVolumeInfo = VolumeInfo.newBuilder()
+          .setAdminName(args.getAdminName())
+          .setOwnerName(args.getOwnerName())
+          .setVolume(args.getVolume())
+          .setQuotaInBytes(args.getQuotaInBytes())
+          .addAllMetadata(metadataList)
+          .addAllVolumeAcls(aclList)
+          .setCreationTime(Time.now())
+          .build();
+      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
+
+      // Add volume to user list
+      addVolumeToOwnerList(args.getVolume(), args.getOwnerName(), batch);
+      metadataManager.writeBatch(batch);
+      LOG.debug("created volume:{} user:{}", args.getVolume(),
+          args.getOwnerName());
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Volume creation failed for user:{} volume:{}",
+            args.getOwnerName(), args.getVolume(), ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  @Override
+  public void setOwner(String volume, String owner) throws IOException {
+    Preconditions.checkNotNull(volume);
+    Preconditions.checkNotNull(owner);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
+            owner, volume);
+        throw  new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+
+      BatchOperation batch = new BatchOperation();
+      delVolumeFromOwnerList(volume, volumeArgs.getOwnerName(), batch);
+      addVolumeToOwnerList(volume, owner, batch);
+
+      OmVolumeArgs newVolumeArgs =
+          OmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume())
+              .setAdminName(volumeArgs.getAdminName())
+              .setOwnerName(owner)
+              .setQuotaInBytes(volumeArgs.getQuotaInBytes())
+              .setCreationTime(volumeArgs.getCreationTime())
+              .build();
+
+      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
+      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
+
+      metadataManager.writeBatch(batch);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Changing volume ownership failed for user:{} volume:{}",
+            owner, volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  public void setQuota(String volume, long quota) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+
+      OmVolumeArgs newVolumeArgs =
+          OmVolumeArgs.newBuilder()
+              .setVolume(volumeArgs.getVolume())
+              .setAdminName(volumeArgs.getAdminName())
+              .setOwnerName(volumeArgs.getOwnerName())
+              .setQuotaInBytes(quota)
+              .setCreationTime(volumeArgs.getCreationTime()).build();
+
+      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
+      metadataManager.put(dbVolumeKey, newVolumeInfo.toByteArray());
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
+            quota, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Gets the volume information.
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.readLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      return volumeArgs;
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.warn("Info volume failed for volume:{}", volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  @Override
+  public void deleteVolume(String volume) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.writeLock().lock();
+    try {
+      BatchOperation batch = new BatchOperation();
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      if (!metadataManager.isVolumeEmpty(volume)) {
+        LOG.debug("volume:{} is not empty", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_EMPTY);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      // delete the volume from the owner list
+      // as well as delete the volume entry
+      delVolumeFromOwnerList(volume, volumeInfo.getOwnerName(), batch);
+      batch.delete(dbVolumeKey);
+      metadataManager.writeBatch(batch);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Delete volume failed for volume:{}", volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Checks if the specified user with a role can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acl which needs to be checked for access
+   * @return true if the user has access for the volume, false otherwise
+   * @throws IOException
+   */
+  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException {
+    Preconditions.checkNotNull(volume);
+    Preconditions.checkNotNull(userAcl);
+    metadataManager.readLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw  new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      return volumeArgs.getAclMap().hasAccess(userAcl);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Check volume access failed for volume:{} user:{} rights:{}",
+            volume, userAcl.getName(), userAcl.getRights(), ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmVolumeArgs> listVolumes(String userName,
+                                        String prefix, String startKey, int maxKeys) throws IOException {
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.listVolumes(
+          userName, prefix, startKey, maxKeys);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
new file mode 100644
index 0000000..55cef97
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.exceptions;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown by Ozone Manager.
+ */
+public class OMException extends IOException {
+  private final OMException.ResultCodes result;
+
+  /**
+   * Constructs an {@code IOException} with {@code null}
+   * as its error detail message.
+   */
+  public OMException(OMException.ResultCodes result) {
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the
+   * {@link #getMessage()} method)
+   */
+  public OMException(String message, OMException.ResultCodes result) {
+    super(message);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message
+   * and cause.
+   * <p>
+   * <p> Note that the detail message associated with {@code cause} is
+   * <i>not</i> automatically incorporated into this exception's detail
+   * message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the
+   * {@link #getMessage()} method)
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   * @since 1.6
+   */
+  public OMException(String message, Throwable cause,
+                     OMException.ResultCodes result) {
+    super(message, cause);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified cause and a
+   * detail message of {@code (cause==null ? null : cause.toString())}
+   * (which typically contains the class and detail message of {@code cause}).
+   * This constructor is useful for IO exceptions that are little more
+   * than wrappers for other throwables.
+   *
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   * @since 1.6
+   */
+  public OMException(Throwable cause, OMException.ResultCodes result) {
+    super(cause);
+    this.result = result;
+  }
+
+  /**
+   * Returns resultCode.
+   * @return ResultCode
+   */
+  public OMException.ResultCodes getResult() {
+    return result;
+  }
+
+  /**
+   * Error codes to make it easy to decode these exceptions.
+   */
+  public enum ResultCodes {
+    FAILED_TOO_MANY_USER_VOLUMES,
+    FAILED_VOLUME_ALREADY_EXISTS,
+    FAILED_VOLUME_NOT_FOUND,
+    FAILED_VOLUME_NOT_EMPTY,
+    FAILED_USER_NOT_FOUND,
+    FAILED_BUCKET_ALREADY_EXISTS,
+    FAILED_BUCKET_NOT_FOUND,
+    FAILED_BUCKET_NOT_EMPTY,
+    FAILED_KEY_ALREADY_EXISTS,
+    FAILED_KEY_NOT_FOUND,
+    FAILED_KEY_ALLOCATION,
+    FAILED_KEY_DELETION,
+    FAILED_KEY_RENAME,
+    FAILED_INVALID_KEY_NAME,
+    FAILED_METADATA_ERROR,
+    FAILED_INTERNAL_ERROR,
+    OM_NOT_INITIALIZED,
+    SCM_VERSION_MISMATCH_ERROR
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
new file mode 100644
index 0000000..5091545
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.exceptions;
+// Exception thrown by OM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..7904d5d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+/*
+ This package contains the Ozone Manager classes.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 38e7797..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,559 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListResponse;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB}
- * to the KeySpaceManagerService server implementation.
- */
-public class KeySpaceManagerProtocolServerSideTranslatorPB implements
-    KeySpaceManagerProtocolPB {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(KeySpaceManagerProtocolServerSideTranslatorPB.class);
-  private final KeySpaceManagerProtocol impl;
-
-  /**
-   * Constructs an instance of the server handler.
-   *
-   * @param impl KeySpaceManagerProtocolPB
-   */
-  public KeySpaceManagerProtocolServerSideTranslatorPB(
-      KeySpaceManagerProtocol impl) {
-    this.impl = impl;
-  }
-
-  // Convert and exception to corresponding status code
-  private Status exceptionToResponseStatus(IOException ex) {
-    if (ex instanceof KSMException) {
-      KSMException ksmException = (KSMException)ex;
-      switch (ksmException.getResult()) {
-      case FAILED_VOLUME_ALREADY_EXISTS:
-        return Status.VOLUME_ALREADY_EXISTS;
-      case FAILED_TOO_MANY_USER_VOLUMES:
-        return Status.USER_TOO_MANY_VOLUMES;
-      case FAILED_VOLUME_NOT_FOUND:
-        return Status.VOLUME_NOT_FOUND;
-      case FAILED_VOLUME_NOT_EMPTY:
-        return Status.VOLUME_NOT_EMPTY;
-      case FAILED_USER_NOT_FOUND:
-        return Status.USER_NOT_FOUND;
-      case FAILED_BUCKET_ALREADY_EXISTS:
-        return Status.BUCKET_ALREADY_EXISTS;
-      case FAILED_BUCKET_NOT_FOUND:
-        return Status.BUCKET_NOT_FOUND;
-      case FAILED_BUCKET_NOT_EMPTY:
-        return Status.BUCKET_NOT_EMPTY;
-      case FAILED_KEY_ALREADY_EXISTS:
-        return Status.KEY_ALREADY_EXISTS;
-      case FAILED_KEY_NOT_FOUND:
-        return Status.KEY_NOT_FOUND;
-      case FAILED_INVALID_KEY_NAME:
-        return Status.INVALID_KEY_NAME;
-      default:
-        return Status.INTERNAL_ERROR;
-      }
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Unknown error occurs", ex);
-      }
-      return Status.INTERNAL_ERROR;
-    }
-  }
-
-  @Override
-  public CreateVolumeResponse createVolume(
-      RpcController controller, CreateVolumeRequest request)
-      throws ServiceException {
-    CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.createVolume(KsmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public SetVolumePropertyResponse setVolumeProperty(
-      RpcController controller, SetVolumePropertyRequest request)
-      throws ServiceException {
-    SetVolumePropertyResponse.Builder resp =
-        SetVolumePropertyResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    String volume = request.getVolumeName();
-
-    try {
-      if (request.hasQuotaInBytes()) {
-        long quota = request.getQuotaInBytes();
-        impl.setQuota(volume, quota);
-      } else {
-        String owner = request.getOwnerName();
-        impl.setOwner(volume, owner);
-      }
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CheckVolumeAccessResponse checkVolumeAccess(
-      RpcController controller, CheckVolumeAccessRequest request)
-      throws ServiceException {
-    CheckVolumeAccessResponse.Builder resp =
-        CheckVolumeAccessResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      boolean access = impl.checkVolumeAccess(request.getVolumeName(),
-          request.getUserAcl());
-      // if no access, set the response status as access denied
-      if (!access) {
-        resp.setStatus(Status.ACCESS_DENIED);
-      }
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-
-    return resp.build();
-  }
-
-  @Override
-  public InfoVolumeResponse infoVolume(
-      RpcController controller, InfoVolumeRequest request)
-      throws ServiceException {
-    InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    String volume = request.getVolumeName();
-    try {
-      KsmVolumeArgs ret = impl.getVolumeInfo(volume);
-      resp.setVolumeInfo(ret.getProtobuf());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public DeleteVolumeResponse deleteVolume(
-      RpcController controller, DeleteVolumeRequest request)
-      throws ServiceException {
-    DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.deleteVolume(request.getVolumeName());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListVolumeResponse listVolumes(
-      RpcController controller, ListVolumeRequest request)
-      throws ServiceException {
-    ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder();
-    List<KsmVolumeArgs> result = Lists.newArrayList();
-    try {
-      if (request.getScope()
-          == ListVolumeRequest.Scope.VOLUMES_BY_USER) {
-        result = impl.listVolumeByUser(request.getUserName(),
-            request.getPrefix(), request.getPrevKey(), request.getMaxKeys());
-      } else if (request.getScope()
-          == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) {
-        result = impl.listAllVolumes(request.getPrefix(), request.getPrevKey(),
-            request.getMaxKeys());
-      }
-
-      if (result == null) {
-        throw new ServiceException("Failed to get volumes for given scope "
-            + request.getScope());
-      }
-
-      result.forEach(item -> resp.addVolumeInfo(item.getProtobuf()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CreateBucketResponse createBucket(
-      RpcController controller, CreateBucketRequest
-      request) throws ServiceException {
-    CreateBucketResponse.Builder resp =
-        CreateBucketResponse.newBuilder();
-    try {
-      impl.createBucket(KsmBucketInfo.getFromProtobuf(
-          request.getBucketInfo()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public InfoBucketResponse infoBucket(
-      RpcController controller, InfoBucketRequest request)
-      throws ServiceException {
-    InfoBucketResponse.Builder resp =
-        InfoBucketResponse.newBuilder();
-    try {
-      KsmBucketInfo ksmBucketInfo = impl.getBucketInfo(
-          request.getVolumeName(), request.getBucketName());
-      resp.setStatus(Status.OK);
-      resp.setBucketInfo(ksmBucketInfo.getProtobuf());
-    } catch(IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse createKey(
-      RpcController controller, LocateKeyRequest request
-  ) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      HddsProtos.ReplicationType type =
-          keyArgs.hasType()? keyArgs.getType() : null;
-      HddsProtos.ReplicationFactor factor =
-          keyArgs.hasFactor()? keyArgs.getFactor() : null;
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setDataSize(keyArgs.getDataSize())
-          .setType(type)
-          .setFactor(factor)
-          .build();
-      if (keyArgs.hasDataSize()) {
-        ksmKeyArgs.setDataSize(keyArgs.getDataSize());
-      } else {
-        ksmKeyArgs.setDataSize(0);
-      }
-      OpenKeySession openKey = impl.openKey(ksmKeyArgs);
-      resp.setKeyInfo(openKey.getKeyInfo().getProtobuf());
-      resp.setID(openKey.getId());
-      resp.setOpenVersion(openKey.getOpenVersion());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse lookupKey(
-      RpcController controller, LocateKeyRequest request
-  ) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      KsmKeyInfo keyInfo = impl.lookupKey(ksmKeyArgs);
-      resp.setKeyInfo(keyInfo.getProtobuf());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public RenameKeyResponse renameKey(
-      RpcController controller, RenameKeyRequest request)
-      throws ServiceException {
-    RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      impl.renameKey(ksmKeyArgs, request.getToKeyName());
-      resp.setStatus(Status.OK);
-    } catch (IOException e){
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public SetBucketPropertyResponse setBucketProperty(
-      RpcController controller, SetBucketPropertyRequest request)
-      throws ServiceException {
-    SetBucketPropertyResponse.Builder resp =
-        SetBucketPropertyResponse.newBuilder();
-    try {
-      impl.setBucketProperty(KsmBucketArgs.getFromProtobuf(
-          request.getBucketArgs()));
-      resp.setStatus(Status.OK);
-    } catch(IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse deleteKey(RpcController controller,
-      LocateKeyRequest request) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      impl.deleteKey(ksmKeyArgs);
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public DeleteBucketResponse deleteBucket(
-      RpcController controller, DeleteBucketRequest request)
-      throws ServiceException {
-    DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.deleteBucket(request.getVolumeName(), request.getBucketName());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListBucketsResponse listBuckets(
-      RpcController controller, ListBucketsRequest request)
-      throws ServiceException {
-    ListBucketsResponse.Builder resp =
-        ListBucketsResponse.newBuilder();
-    try {
-      List<KsmBucketInfo> buckets = impl.listBuckets(
-          request.getVolumeName(),
-          request.getStartKey(),
-          request.getPrefix(),
-          request.getCount());
-      for(KsmBucketInfo bucket : buckets) {
-        resp.addBucketInfo(bucket.getProtobuf());
-      }
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListKeysResponse listKeys(RpcController controller,
-      ListKeysRequest request) throws ServiceException {
-    ListKeysResponse.Builder resp =
-        ListKeysResponse.newBuilder();
-    try {
-      List<KsmKeyInfo> keys = impl.listKeys(
-          request.getVolumeName(),
-          request.getBucketName(),
-          request.getStartKey(),
-          request.getPrefix(),
-          request.getCount());
-      for(KsmKeyInfo key : keys) {
-        resp.addKeyInfo(key.getProtobuf());
-      }
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CommitKeyResponse commitKey(RpcController controller,
-      CommitKeyRequest request) throws ServiceException {
-    CommitKeyResponse.Builder resp =
-        CommitKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      HddsProtos.ReplicationType type =
-          keyArgs.hasType()? keyArgs.getType() : null;
-      HddsProtos.ReplicationFactor factor =
-          keyArgs.hasFactor()? keyArgs.getFactor() : null;
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setDataSize(keyArgs.getDataSize())
-          .setType(type)
-          .setFactor(factor)
-          .build();
-      int id = request.getClientID();
-      impl.commitKey(ksmKeyArgs, id);
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public AllocateBlockResponse allocateBlock(RpcController controller,
-      AllocateBlockRequest request) throws ServiceException {
-    AllocateBlockResponse.Builder resp =
-        AllocateBlockResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      int id = request.getClientID();
-      KsmKeyLocationInfo newLocation = impl.allocateBlock(ksmKeyArgs, id);
-      resp.setKeyLocation(newLocation.getProtobuf());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ServiceListResponse getServiceList(RpcController controller,
-      ServiceListRequest request) throws ServiceException {
-    ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
-    try {
-      resp.addAllServiceInfo(impl.getServiceList().stream()
-          .map(ServiceInfo::getProtobuf)
-          .collect(Collectors.toList()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..40a88b6
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -0,0 +1,571 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListResponse;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * This class is the server-side translator that forwards requests received on
+ * {@link OzoneManagerProtocolPB}
+ * to the OzoneManagerService server implementation.
+ */
+public class OzoneManagerProtocolServerSideTranslatorPB implements
+    OzoneManagerProtocolPB {
+  private static final Logger LOG = LoggerFactory
+      .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class);
+  private final OzoneManagerProtocol impl;
+
+  /**
+   * Constructs an instance of the server handler.
+   *
+   * @param impl OzoneManagerProtocolPB
+   */
+  public OzoneManagerProtocolServerSideTranslatorPB(
+      OzoneManagerProtocol impl) {
+    this.impl = impl;
+  }
+
+  // Convert and exception to corresponding status code
+  private Status exceptionToResponseStatus(IOException ex) {
+    if (ex instanceof OMException) {
+      OMException omException = (OMException)ex;
+      switch (omException.getResult()) {
+      case FAILED_VOLUME_ALREADY_EXISTS:
+        return Status.VOLUME_ALREADY_EXISTS;
+      case FAILED_TOO_MANY_USER_VOLUMES:
+        return Status.USER_TOO_MANY_VOLUMES;
+      case FAILED_VOLUME_NOT_FOUND:
+        return Status.VOLUME_NOT_FOUND;
+      case FAILED_VOLUME_NOT_EMPTY:
+        return Status.VOLUME_NOT_EMPTY;
+      case FAILED_USER_NOT_FOUND:
+        return Status.USER_NOT_FOUND;
+      case FAILED_BUCKET_ALREADY_EXISTS:
+        return Status.BUCKET_ALREADY_EXISTS;
+      case FAILED_BUCKET_NOT_FOUND:
+        return Status.BUCKET_NOT_FOUND;
+      case FAILED_BUCKET_NOT_EMPTY:
+        return Status.BUCKET_NOT_EMPTY;
+      case FAILED_KEY_ALREADY_EXISTS:
+        return Status.KEY_ALREADY_EXISTS;
+      case FAILED_KEY_NOT_FOUND:
+        return Status.KEY_NOT_FOUND;
+      case FAILED_INVALID_KEY_NAME:
+        return Status.INVALID_KEY_NAME;
+      case FAILED_KEY_ALLOCATION:
+        return Status.KEY_ALLOCATION_ERROR;
+      case FAILED_KEY_DELETION:
+        return Status.KEY_DELETION_ERROR;
+      case FAILED_KEY_RENAME:
+        return Status.KEY_RENAME_ERROR;
+      case FAILED_METADATA_ERROR:
+        return Status.METADATA_ERROR;
+      case OM_NOT_INITIALIZED:
+        return Status.OM_NOT_INITIALIZED;
+      case SCM_VERSION_MISMATCH_ERROR:
+        return Status.SCM_VERSION_MISMATCH_ERROR;
+      default:
+        return Status.INTERNAL_ERROR;
+      }
+    } else {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unknown error occurs", ex);
+      }
+      return Status.INTERNAL_ERROR;
+    }
+  }
+
+  @Override
+  public CreateVolumeResponse createVolume(
+      RpcController controller, CreateVolumeRequest request)
+      throws ServiceException {
+    CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.createVolume(OmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public SetVolumePropertyResponse setVolumeProperty(
+      RpcController controller, SetVolumePropertyRequest request)
+      throws ServiceException {
+    SetVolumePropertyResponse.Builder resp =
+        SetVolumePropertyResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    String volume = request.getVolumeName();
+
+    try {
+      if (request.hasQuotaInBytes()) {
+        long quota = request.getQuotaInBytes();
+        impl.setQuota(volume, quota);
+      } else {
+        String owner = request.getOwnerName();
+        impl.setOwner(volume, owner);
+      }
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CheckVolumeAccessResponse checkVolumeAccess(
+      RpcController controller, CheckVolumeAccessRequest request)
+      throws ServiceException {
+    CheckVolumeAccessResponse.Builder resp =
+        CheckVolumeAccessResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      boolean access = impl.checkVolumeAccess(request.getVolumeName(),
+          request.getUserAcl());
+      // if no access, set the response status as access denied
+      if (!access) {
+        resp.setStatus(Status.ACCESS_DENIED);
+      }
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+
+    return resp.build();
+  }
+
+  @Override
+  public InfoVolumeResponse infoVolume(
+      RpcController controller, InfoVolumeRequest request)
+      throws ServiceException {
+    InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    String volume = request.getVolumeName();
+    try {
+      OmVolumeArgs ret = impl.getVolumeInfo(volume);
+      resp.setVolumeInfo(ret.getProtobuf());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public DeleteVolumeResponse deleteVolume(
+      RpcController controller, DeleteVolumeRequest request)
+      throws ServiceException {
+    DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.deleteVolume(request.getVolumeName());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListVolumeResponse listVolumes(
+      RpcController controller, ListVolumeRequest request)
+      throws ServiceException {
+    ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder();
+    List<OmVolumeArgs> result = Lists.newArrayList();
+    try {
+      if (request.getScope()
+          == ListVolumeRequest.Scope.VOLUMES_BY_USER) {
+        result = impl.listVolumeByUser(request.getUserName(),
+            request.getPrefix(), request.getPrevKey(), request.getMaxKeys());
+      } else if (request.getScope()
+          == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) {
+        result = impl.listAllVolumes(request.getPrefix(), request.getPrevKey(),
+            request.getMaxKeys());
+      }
+
+      if (result == null) {
+        throw new ServiceException("Failed to get volumes for given scope "
+            + request.getScope());
+      }
+
+      result.forEach(item -> resp.addVolumeInfo(item.getProtobuf()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CreateBucketResponse createBucket(
+      RpcController controller, CreateBucketRequest
+      request) throws ServiceException {
+    CreateBucketResponse.Builder resp =
+        CreateBucketResponse.newBuilder();
+    try {
+      impl.createBucket(OmBucketInfo.getFromProtobuf(
+          request.getBucketInfo()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public InfoBucketResponse infoBucket(
+      RpcController controller, InfoBucketRequest request)
+      throws ServiceException {
+    InfoBucketResponse.Builder resp =
+        InfoBucketResponse.newBuilder();
+    try {
+      OmBucketInfo omBucketInfo = impl.getBucketInfo(
+          request.getVolumeName(), request.getBucketName());
+      resp.setStatus(Status.OK);
+      resp.setBucketInfo(omBucketInfo.getProtobuf());
+    } catch(IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse createKey(
+      RpcController controller, LocateKeyRequest request
+  ) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      HddsProtos.ReplicationType type =
+          keyArgs.hasType()? keyArgs.getType() : null;
+      HddsProtos.ReplicationFactor factor =
+          keyArgs.hasFactor()? keyArgs.getFactor() : null;
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .setDataSize(keyArgs.getDataSize())
+          .setType(type)
+          .setFactor(factor)
+          .build();
+      if (keyArgs.hasDataSize()) {
+        omKeyArgs.setDataSize(keyArgs.getDataSize());
+      } else {
+        omKeyArgs.setDataSize(0);
+      }
+      OpenKeySession openKey = impl.openKey(omKeyArgs);
+      resp.setKeyInfo(openKey.getKeyInfo().getProtobuf());
+      resp.setID(openKey.getId());
+      resp.setOpenVersion(openKey.getOpenVersion());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse lookupKey(
+      RpcController controller, LocateKeyRequest request
+  ) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      OmKeyInfo keyInfo = impl.lookupKey(omKeyArgs);
+      resp.setKeyInfo(keyInfo.getProtobuf());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public RenameKeyResponse renameKey(
+      RpcController controller, RenameKeyRequest request)
+      throws ServiceException {
+    RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      impl.renameKey(omKeyArgs, request.getToKeyName());
+      resp.setStatus(Status.OK);
+    } catch (IOException e){
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public SetBucketPropertyResponse setBucketProperty(
+      RpcController controller, SetBucketPropertyRequest request)
+      throws ServiceException {
+    SetBucketPropertyResponse.Builder resp =
+        SetBucketPropertyResponse.newBuilder();
+    try {
+      impl.setBucketProperty(OmBucketArgs.getFromProtobuf(
+          request.getBucketArgs()));
+      resp.setStatus(Status.OK);
+    } catch(IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse deleteKey(RpcController controller,
+      LocateKeyRequest request) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      impl.deleteKey(omKeyArgs);
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public DeleteBucketResponse deleteBucket(
+      RpcController controller, DeleteBucketRequest request)
+      throws ServiceException {
+    DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.deleteBucket(request.getVolumeName(), request.getBucketName());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListBucketsResponse listBuckets(
+      RpcController controller, ListBucketsRequest request)
+      throws ServiceException {
+    ListBucketsResponse.Builder resp =
+        ListBucketsResponse.newBuilder();
+    try {
+      List<OmBucketInfo> buckets = impl.listBuckets(
+          request.getVolumeName(),
+          request.getStartKey(),
+          request.getPrefix(),
+          request.getCount());
+      for(OmBucketInfo bucket : buckets) {
+        resp.addBucketInfo(bucket.getProtobuf());
+      }
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListKeysResponse listKeys(RpcController controller,
+      ListKeysRequest request) throws ServiceException {
+    ListKeysResponse.Builder resp =
+        ListKeysResponse.newBuilder();
+    try {
+      List<OmKeyInfo> keys = impl.listKeys(
+          request.getVolumeName(),
+          request.getBucketName(),
+          request.getStartKey(),
+          request.getPrefix(),
+          request.getCount());
+      for(OmKeyInfo key : keys) {
+        resp.addKeyInfo(key.getProtobuf());
+      }
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CommitKeyResponse commitKey(RpcController controller,
+      CommitKeyRequest request) throws ServiceException {
+    CommitKeyResponse.Builder resp =
+        CommitKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      HddsProtos.ReplicationType type =
+          keyArgs.hasType()? keyArgs.getType() : null;
+      HddsProtos.ReplicationFactor factor =
+          keyArgs.hasFactor()? keyArgs.getFactor() : null;
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .setDataSize(keyArgs.getDataSize())
+          .setType(type)
+          .setFactor(factor)
+          .build();
+      int id = request.getClientID();
+      impl.commitKey(omKeyArgs, id);
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public AllocateBlockResponse allocateBlock(RpcController controller,
+      AllocateBlockRequest request) throws ServiceException {
+    AllocateBlockResponse.Builder resp =
+        AllocateBlockResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      int id = request.getClientID();
+      OmKeyLocationInfo newLocation = impl.allocateBlock(omKeyArgs, id);
+      resp.setKeyLocation(newLocation.getProtobuf());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ServiceListResponse getServiceList(RpcController controller,
+      ServiceListRequest request) throws ServiceException {
+    ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
+    try {
+      resp.addAllServiceInfo(impl.getServiceList().stream()
+          .map(ServiceInfo::getProtobuf)
+          .collect(Collectors.toList()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
index e9c2430..9bc393d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
@@ -18,5 +18,5 @@
 package org.apache.hadoop.ozone.protocolPB;
 
 /**
- * KSM protocol buffer translators.
+ * OM protocol buffer translators.
  */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
deleted file mode 100644
index 7f18028..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
+++ /dev/null
@@ -1,70 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
-    <meta name="description" content="HDFS Key Space Manager">
-
-    <title>HDFS Key Space Manager</title>
-
-    <link href="static/bootstrap-3.3.7/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="ksm">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">HDFS KSM</a>
-        </div>
-        <navmenu
-                metrics="{ 'Ksm metrics' : '#!/metrics/ksm', 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
-    </div>
-</header>
-
-<div class="container-fluid">
-    <ng-view></ng-view>
-</div><!-- /.container -->
-
-<script src="static/jquery-3.3.1.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="ksm.js"></script>
-<script src="static/bootstrap-3.3.7/js/bootstrap.min.js"></script>
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
deleted file mode 100644
index e63fb00..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
+++ /dev/null
@@ -1,44 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>KSM Metrics</h1>
-
-<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
-    <h2>{{type}}</h2>
-    <div class="container">
-        <div class="col-md-6">
-            <h3>Requests ({{numbers.ops}} ops)</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.all"></nvd3>
-        </div>
-        <div class="col-md-6">
-            <h3>Failures</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.failures"></nvd3>
-        </div>
-    </div>
-</div>
-
-<div ng-show="$ctrl.metrics.others.length > 0">
-    <h2>Other JMX properties</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
deleted file mode 100644
index ab6f73b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-
-    var isIgnoredJmxKeys = function (key) {
-        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
-    };
-
-    angular.module('ksm', ['ozone', 'nvd3']);
-    angular.module('ksm').config(function ($routeProvider) {
-        $routeProvider
-            .when("/metrics/ksm", {
-                template: "<ksm-metrics></ksm-metrics>"
-            });
-    });
-    angular.module('ksm').component('ksmMetrics', {
-        templateUrl: 'ksm-metrics.html',
-        controller: function ($http) {
-            var ctrl = this;
-
-            ctrl.graphOptions = {
-                chart: {
-                    type: 'pieChart',
-                    height: 500,
-                    x: function (d) {
-                        return d.key;
-                    },
-                    y: function (d) {
-                        return d.value;
-                    },
-                    showLabels: true,
-                    labelType: 'value',
-                    duration: 500,
-                    labelThreshold: 0.01,
-                    valueFormat: function(d) {
-                        return d3.format('d')(d);
-                    },
-                    legend: {
-                        margin: {
-                            top: 5,
-                            right: 35,
-                            bottom: 5,
-                            left: 0
-                        }
-                    }
-                }
-            };
-
-
-            $http.get("jmx?qry=Hadoop:service=KeySpaceManager,name=KSMMetrics")
-                .then(function (result) {
-
-                    var groupedMetrics = {others: [], nums: {}};
-                    var metrics = result.data.beans[0]
-                    for (var key in metrics) {
-                        var numericalStatistic = key.match(/Num([A-Z][a-z]+)(.+?)(Fails)?$/);
-                        if (numericalStatistic) {
-                            var type = numericalStatistic[1];
-                            var name = numericalStatistic[2];
-                            var failed = numericalStatistic[3];
-                            groupedMetrics.nums[type] = groupedMetrics.nums[type] || {
-                                    failures: [],
-                                    all: []
-                                };
-                            if (failed) {
-                                groupedMetrics.nums[type].failures.push({
-                                    key: name,
-                                    value: metrics[key]
-                                })
-                            } else {
-                                if (name == "Ops") {
-                                    groupedMetrics.nums[type].ops = metrics[key]
-                                } else {
-                                    groupedMetrics.nums[type].all.push({
-                                        key: name,
-                                        value: metrics[key]
-                                    })
-                                }
-                            }
-                        } else if (isIgnoredJmxKeys(key)) {
-                            //ignore
-                        } else {
-                            groupedMetrics.others.push({
-                                'key': key,
-                                'value': metrics[key]
-                            });
-                        }
-                    }
-                    ctrl.metrics = groupedMetrics;
-                })
-        }
-    });
-
-})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
deleted file mode 100644
index e442adc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-  padding-top: 50px;
-}
-.starter-template {
-  padding: 40px 15px;
-  text-align: center;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
deleted file mode 100644
index 0821899..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-</overview>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
new file mode 100644
index 0000000..ba54cb2
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
@@ -0,0 +1,70 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<html lang="en">
+<head>
+    <meta charset="utf-8">
+    <meta http-equiv="X-UA-Compatible" content="IE=edge">
+    <meta name="viewport" content="width=device-width, initial-scale=1">
+    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
+    <meta name="description" content="Ozone Manager">
+
+    <title>Ozone Manager</title>
+
+    <link href="static/bootstrap-3.3.7/css/bootstrap.min.css" rel="stylesheet">
+    <link href="static/hadoop.css" rel="stylesheet">
+    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
+
+    <link href="static/ozone.css" rel="stylesheet">
+
+</head>
+
+<body ng-app="ozoneManager">
+
+<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
+    <div class="container-fluid">
+        <div class="navbar-header">
+            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
+                    aria-expanded="false" aria-controls="navbar">
+                <span class="sr-only">Toggle navigation</span>
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+            </button>
+            <a class="navbar-brand" href="#">Ozone Manager</a>
+        </div>
+        <navmenu
+                metrics="{ 'OM metrics' : '#!/metrics/ozoneManager', 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
+    </div>
+</header>
+
+<div class="container-fluid">
+    <ng-view></ng-view>
+</div><!-- /.container -->
+
+<script src="static/jquery-3.3.1.min.js"></script>
+<script src="static/angular-1.6.4.min.js"></script>
+<script src="static/angular-route-1.6.4.min.js"></script>
+<script src="static/d3-3.5.17.min.js"></script>
+<script src="static/nvd3-1.8.5.min.js"></script>
+<script src="static/angular-nvd3-1.0.9.min.js"></script>
+<script src="static/ozone.js"></script>
+<script src="ozoneManager.js"></script>
+<script src="static/bootstrap-3.3.7/js/bootstrap.min.js"></script>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
new file mode 100644
index 0000000..e442adc
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
@@ -0,0 +1,23 @@
+/**
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+*/
+body {
+  padding-top: 50px;
+}
+.starter-template {
+  padding: 40px 15px;
+  text-align: center;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
new file mode 100644
index 0000000..0821899
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
@@ -0,0 +1,18 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+<overview>
+</overview>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
new file mode 100644
index 0000000..15fba2f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
@@ -0,0 +1,44 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<h1>OzoneManager Metrics</h1>
+
+<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
+    <h2>{{type}}</h2>
+    <div class="container">
+        <div class="col-md-6">
+            <h3>Requests ({{numbers.ops}} ops)</h3>
+            <nvd3 options="$ctrl.graphOptions"
+                  data="numbers.all"></nvd3>
+        </div>
+        <div class="col-md-6">
+            <h3>Failures</h3>
+            <nvd3 options="$ctrl.graphOptions"
+                  data="numbers.failures"></nvd3>
+        </div>
+    </div>
+</div>
+
+<div ng-show="$ctrl.metrics.others.length > 0">
+    <h2>Other JMX properties</h2>
+
+    <table class="table">
+        <tr ng-repeat="metric in $ctrl.metrics.others">
+            <td>{{metric.key}}</td>
+            <td>{{metric.value}}</td>
+        </tr>
+    </table>
+</div>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDFS-13710. RBF: setQuota and getQuotaUsage should check the dfs.federation.router.quota.enable. Contributed by yanghuafeng.

Posted by bh...@apache.org.
HDFS-13710. RBF: setQuota and getQuotaUsage should check the dfs.federation.router.quota.enable. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43f7fe8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43f7fe8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43f7fe8a

Branch: refs/heads/HDDS-48
Commit: 43f7fe8aae0eca89cce4d67bfc4965fe8ce63e38
Parents: 7a68ac6
Author: Yiqun Lin <yq...@apache.org>
Authored: Mon Jul 9 15:06:07 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Mon Jul 9 15:06:07 2018 +0800

----------------------------------------------------------------------
 .../hdfs/server/federation/router/Quota.java    |  8 ++
 .../federation/router/RouterRpcServer.java      |  1 -
 .../router/TestDisableRouterQuota.java          | 94 ++++++++++++++++++++
 3 files changed, 102 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f7fe8a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index 413a4e1..75d3e04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -67,6 +67,9 @@ public class Quota {
   public void setQuota(String path, long namespaceQuota,
       long storagespaceQuota, StorageType type) throws IOException {
     rpcServer.checkOperation(OperationCategory.WRITE);
+    if (!router.isQuotaEnabled()) {
+      throw new IOException("The quota system is disabled in Router.");
+    }
 
     // Set quota for current path and its children mount table path.
     final List<RemoteLocation> locations = getQuotaRemoteLocations(path);
@@ -91,6 +94,11 @@ public class Quota {
    * @throws IOException
    */
   public QuotaUsage getQuotaUsage(String path) throws IOException {
+    rpcServer.checkOperation(OperationCategory.READ);
+    if (!router.isQuotaEnabled()) {
+      throw new IOException("The quota system is disabled in Router.");
+    }
+
     final List<RemoteLocation> quotaLocs = getValidQuotaLocations(path);
     RemoteMethod method = new RemoteMethod("getQuotaUsage",
         new Class<?>[] {String.class}, new RemoteParam());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f7fe8a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 716ebee..7031af7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1996,7 +1996,6 @@ public class RouterRpcServer extends AbstractService
 
   @Override // ClientProtocol
   public QuotaUsage getQuotaUsage(String path) throws IOException {
-    checkOperation(OperationCategory.READ);
     return this.quotaCall.getQuotaUsage(path);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f7fe8a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java
new file mode 100644
index 0000000..2632f59
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+
+import java.io.IOException;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+/**
+ * Test the behavior when disabling the Router quota.
+ */
+public class TestDisableRouterQuota {
+
+  private static Router router;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    // Build and start a router
+    router = new Router();
+    Configuration routerConf = new RouterConfigBuilder()
+        .quota(false) //set false to verify the quota disabled in Router
+        .rpc()
+        .build();
+    router.init(routerConf);
+    router.setRouterId("TestRouterId");
+    router.start();
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    if (router != null) {
+      router.stop();
+      router.close();
+    }
+  }
+
+  @Before
+  public void checkDisableQuota() {
+    assertFalse(router.isQuotaEnabled());
+  }
+
+  @Test
+  public void testSetQuota() throws Exception {
+    long nsQuota = 1024;
+    long ssQuota = 1024;
+
+    try {
+      Quota quotaModule = router.getRpcServer().getQuotaModule();
+      quotaModule.setQuota("/test", nsQuota, ssQuota, null);
+      fail("The setQuota call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "The quota system is disabled in Router.", ioe);
+    }
+  }
+
+  @Test
+  public void testGetQuotaUsage() throws Exception {
+    try {
+      Quota quotaModule = router.getRpcServer().getQuotaModule();
+      quotaModule.getQuotaUsage("/test");
+      fail("The getQuotaUsage call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "The quota system is disabled in Router.", ioe);
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
deleted file mode 100644
index cc2f78a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeList;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BatchOperation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_USER_MAX_VOLUME_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_USER_MAX_VOLUME;
-import static org.apache.hadoop.ozone.ksm.exceptions
-    .KSMException.ResultCodes;
-
-/**
- * KSM volume management code.
- */
-public class VolumeManagerImpl implements VolumeManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(VolumeManagerImpl.class);
-
-  private final KSMMetadataManager metadataManager;
-  private final int maxUserVolumeCount;
-
-  /**
-   * Constructor.
-   * @param conf - Ozone configuration.
-   * @throws IOException
-   */
-  public VolumeManagerImpl(KSMMetadataManager metadataManager,
-      OzoneConfiguration conf) throws IOException {
-    this.metadataManager = metadataManager;
-    this.maxUserVolumeCount = conf.getInt(OZONE_KSM_USER_MAX_VOLUME,
-        OZONE_KSM_USER_MAX_VOLUME_DEFAULT);
-  }
-
-  // Helpers to add and delete volume from user list
-  private void addVolumeToOwnerList(String volume, String owner,
-      BatchOperation batchOperation) throws IOException {
-    // Get the volume list
-    byte[] dbUserKey = metadataManager.getUserKey(owner);
-    byte[] volumeList  = metadataManager.get(dbUserKey);
-    List<String> prevVolList = new LinkedList<>();
-    if (volumeList != null) {
-      VolumeList vlist = VolumeList.parseFrom(volumeList);
-      prevVolList.addAll(vlist.getVolumeNamesList());
-    }
-
-    // Check the volume count
-    if (prevVolList.size() >= maxUserVolumeCount) {
-      LOG.debug("Too many volumes for user:{}", owner);
-      throw new KSMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
-    }
-
-    // Add the new volume to the list
-    prevVolList.add(volume);
-    VolumeList newVolList = VolumeList.newBuilder()
-        .addAllVolumeNames(prevVolList).build();
-    batchOperation.put(dbUserKey, newVolList.toByteArray());
-  }
-
-  private void delVolumeFromOwnerList(String volume, String owner,
-                                      BatchOperation batchOperation)
-      throws IOException {
-    // Get the volume list
-    byte[] dbUserKey = metadataManager.getUserKey(owner);
-    byte[] volumeList  = metadataManager.get(dbUserKey);
-    List<String> prevVolList = new LinkedList<>();
-    if (volumeList != null) {
-      VolumeList vlist = VolumeList.parseFrom(volumeList);
-      prevVolList.addAll(vlist.getVolumeNamesList());
-    } else {
-      LOG.debug("volume:{} not found for user:{}");
-      throw new KSMException(ResultCodes.FAILED_USER_NOT_FOUND);
-    }
-
-    // Remove the volume from the list
-    prevVolList.remove(volume);
-    if (prevVolList.size() == 0) {
-      batchOperation.delete(dbUserKey);
-    } else {
-      VolumeList newVolList = VolumeList.newBuilder()
-          .addAllVolumeNames(prevVolList).build();
-      batchOperation.put(dbUserKey, newVolList.toByteArray());
-    }
-  }
-
-  /**
-   * Creates a volume.
-   * @param args - KsmVolumeArgs.
-   */
-  @Override
-  public void createVolume(KsmVolumeArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(args.getVolume());
-      byte[] volumeInfo = metadataManager.get(dbVolumeKey);
-
-      // Check of the volume already exists
-      if (volumeInfo != null) {
-        LOG.debug("volume:{} already exists", args.getVolume());
-        throw new KSMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
-      }
-
-      BatchOperation batch = new BatchOperation();
-      // Write the vol info
-      List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
-      for (Map.Entry<String, String> entry : args.getKeyValueMap().entrySet()) {
-        metadataList.add(HddsProtos.KeyValue.newBuilder()
-            .setKey(entry.getKey()).setValue(entry.getValue()).build());
-      }
-      List<OzoneAclInfo> aclList = args.getAclMap().ozoneAclGetProtobuf();
-
-      VolumeInfo newVolumeInfo = VolumeInfo.newBuilder()
-          .setAdminName(args.getAdminName())
-          .setOwnerName(args.getOwnerName())
-          .setVolume(args.getVolume())
-          .setQuotaInBytes(args.getQuotaInBytes())
-          .addAllMetadata(metadataList)
-          .addAllVolumeAcls(aclList)
-          .setCreationTime(Time.now())
-          .build();
-      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
-
-      // Add volume to user list
-      addVolumeToOwnerList(args.getVolume(), args.getOwnerName(), batch);
-      metadataManager.writeBatch(batch);
-      LOG.debug("created volume:{} user:{}", args.getVolume(),
-          args.getOwnerName());
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Volume creation failed for user:{} volume:{}",
-            args.getOwnerName(), args.getVolume(), ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    Preconditions.checkNotNull(volume);
-    Preconditions.checkNotNull(owner);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
-            owner, volume);
-        throw  new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-
-      BatchOperation batch = new BatchOperation();
-      delVolumeFromOwnerList(volume, volumeArgs.getOwnerName(), batch);
-      addVolumeToOwnerList(volume, owner, batch);
-
-      KsmVolumeArgs newVolumeArgs =
-          KsmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume())
-              .setAdminName(volumeArgs.getAdminName())
-              .setOwnerName(owner)
-              .setQuotaInBytes(volumeArgs.getQuotaInBytes())
-              .setCreationTime(volumeArgs.getCreationTime())
-              .build();
-
-      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
-      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
-
-      metadataManager.writeBatch(batch);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Changing volume ownership failed for user:{} volume:{}",
-            owner, volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  public void setQuota(String volume, long quota) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-
-      KsmVolumeArgs newVolumeArgs =
-          KsmVolumeArgs.newBuilder()
-              .setVolume(volumeArgs.getVolume())
-              .setAdminName(volumeArgs.getAdminName())
-              .setOwnerName(volumeArgs.getOwnerName())
-              .setQuotaInBytes(quota)
-              .setCreationTime(volumeArgs.getCreationTime()).build();
-
-      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
-      metadataManager.put(dbVolumeKey, newVolumeInfo.toByteArray());
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
-            quota, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.readLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-      return volumeArgs;
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.warn("Info volume failed for volume:{}", volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.writeLock().lock();
-    try {
-      BatchOperation batch = new BatchOperation();
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      if (!metadataManager.isVolumeEmpty(volume)) {
-        LOG.debug("volume:{} is not empty", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_EMPTY);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-      // delete the volume from the owner list
-      // as well as delete the volume entry
-      delVolumeFromOwnerList(volume, volumeInfo.getOwnerName(), batch);
-      batch.delete(dbVolumeKey);
-      metadataManager.writeBatch(batch);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Delete volume failed for volume:{}", volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Checks if the specified user with a role can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acl which needs to be checked for access
-   * @return true if the user has access for the volume, false otherwise
-   * @throws IOException
-   */
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException {
-    Preconditions.checkNotNull(volume);
-    Preconditions.checkNotNull(userAcl);
-    metadataManager.readLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw  new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-      return volumeArgs.getAclMap().hasAccess(userAcl);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Check volume access failed for volume:{} user:{} rights:{}",
-            volume, userAcl.getName(), userAcl.getRights(), ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmVolumeArgs> listVolumes(String userName,
-      String prefix, String startKey, int maxKeys) throws IOException {
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.listVolumes(
-          userName, prefix, startKey, maxKeys);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
deleted file mode 100644
index b902eab..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.exceptions;
-
-import java.io.IOException;
-
-/**
- * Exception thrown by KSM.
- */
-public class KSMException extends IOException {
-  private final KSMException.ResultCodes result;
-
-  /**
-   * Constructs an {@code IOException} with {@code null}
-   * as its error detail message.
-   */
-  public KSMException(KSMException.ResultCodes result) {
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   */
-  public KSMException(String message, KSMException.ResultCodes result) {
-    super(message);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message
-   * and cause.
-   * <p>
-   * <p> Note that the detail message associated with {@code cause} is
-   * <i>not</i> automatically incorporated into this exception's detail
-   * message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public KSMException(String message, Throwable cause,
-                      KSMException.ResultCodes result) {
-    super(message, cause);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified cause and a
-   * detail message of {@code (cause==null ? null : cause.toString())}
-   * (which typically contains the class and detail message of {@code cause}).
-   * This constructor is useful for IO exceptions that are little more
-   * than wrappers for other throwables.
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public KSMException(Throwable cause, KSMException.ResultCodes result) {
-    super(cause);
-    this.result = result;
-  }
-
-  /**
-   * Returns resultCode.
-   * @return ResultCode
-   */
-  public KSMException.ResultCodes getResult() {
-    return result;
-  }
-
-  /**
-   * Error codes to make it easy to decode these exceptions.
-   */
-  public enum ResultCodes {
-    FAILED_TOO_MANY_USER_VOLUMES,
-    FAILED_VOLUME_ALREADY_EXISTS,
-    FAILED_VOLUME_NOT_FOUND,
-    FAILED_VOLUME_NOT_EMPTY,
-    FAILED_USER_NOT_FOUND,
-    FAILED_BUCKET_ALREADY_EXISTS,
-    FAILED_BUCKET_NOT_FOUND,
-    FAILED_BUCKET_NOT_EMPTY,
-    FAILED_KEY_ALREADY_EXISTS,
-    FAILED_KEY_NOT_FOUND,
-    FAILED_KEY_ALLOCATION,
-    FAILED_KEY_DELETION,
-    FAILED_KEY_RENAME,
-    FAILED_INVALID_KEY_NAME,
-    FAILED_METADATA_ERROR,
-    FAILED_INTERNAL_ERROR,
-    KSM_NOT_INITIALIZED,
-    SCM_VERSION_MISMATCH_ERROR
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
deleted file mode 100644
index 09fd87f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm.exceptions;
-// Exception thrown by KSM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
deleted file mode 100644
index 09d9f32..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-/*
- This package contains the keyspace manager classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
new file mode 100644
index 0000000..ddb2b0e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * BucketManager handles all the bucket level operations.
+ */
+public interface BucketManager {
+  /**
+   * Creates a bucket.
+   * @param bucketInfo - OmBucketInfo for creating bucket.
+   */
+  void createBucket(OmBucketInfo bucketInfo) throws IOException;
+  /**
+   * Returns Bucket Information.
+   * @param volumeName - Name of the Volume.
+   * @param bucketName - Name of the Bucket.
+   */
+  OmBucketInfo getBucketInfo(String volumeName, String bucketName)
+      throws IOException;
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  void setBucketProperty(OmBucketArgs args) throws IOException;
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volumeName - Name of the volume.
+   * @param bucketName - Name of the bucket.
+   * @throws IOException
+   */
+  void deleteBucket(String volumeName, String bucketName) throws IOException;
+
+  /**
+   * Returns a list of buckets represented by {@link OmBucketInfo}
+   * in the given volume.
+   *
+   * @param volumeName
+   *   Required parameter volume name determines buckets in which volume
+   *   to return.
+   * @param startBucket
+   *   Optional start bucket name parameter indicating where to start
+   *   the bucket listing from, this key is excluded from the result.
+   * @param bucketPrefix
+   *   Optional start key parameter, restricting the response to buckets
+   *   that begin with the specified name.
+   * @param maxNumOfBuckets
+   *   The maximum number of buckets to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of buckets.
+   * @throws IOException
+   */
+  List<OmBucketInfo> listBuckets(String volumeName,
+                                 String startBucket, String bucketPrefix, int maxNumOfBuckets)
+      throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
new file mode 100644
index 0000000..4bbce81
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -0,0 +1,315 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.util.Time;
+import org.iq80.leveldb.DBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * OM bucket manager.
+ */
+public class BucketManagerImpl implements BucketManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BucketManagerImpl.class);
+
+  /**
+   * OMMetadataManager is used for accessing OM MetadataDB and ReadWriteLock.
+   */
+  private final OMMetadataManager metadataManager;
+
+  /**
+   * Constructs BucketManager.
+   * @param metadataManager
+   */
+  public BucketManagerImpl(OMMetadataManager metadataManager){
+    this.metadataManager = metadataManager;
+  }
+
+  /**
+   * MetadataDB is maintained in MetadataManager and shared between
+   * BucketManager and VolumeManager. (and also by KeyManager)
+   *
+   * BucketManager uses MetadataDB to store bucket level information.
+   *
+   * Keys used in BucketManager for storing data into MetadataDB
+   * for BucketInfo:
+   * {volume/bucket} -> bucketInfo
+   *
+   * Work flow of create bucket:
+   *
+   * -> Check if the Volume exists in metadataDB, if not throw
+   * VolumeNotFoundException.
+   * -> Else check if the Bucket exists in metadataDB, if so throw
+   * BucketExistException
+   * -> Else update MetadataDB with VolumeInfo.
+   */
+
+  /**
+   * Creates a bucket.
+   * @param bucketInfo - OmBucketInfo.
+   */
+  @Override
+  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
+    Preconditions.checkNotNull(bucketInfo);
+    metadataManager.writeLock().lock();
+    String volumeName = bucketInfo.getVolumeName();
+    String bucketName = bucketInfo.getBucketName();
+    try {
+      byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+
+      //Check if the volume exists
+      if (metadataManager.get(volumeKey) == null) {
+        LOG.debug("volume: {} not found ", volumeName);
+        throw new OMException("Volume doesn't exist",
+            OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+      //Check if bucket already exists
+      if (metadataManager.get(bucketKey) != null) {
+        LOG.debug("bucket: {} already exists ", bucketName);
+        throw new OMException("Bucket already exist",
+            OMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS);
+      }
+
+      OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName(bucketInfo.getVolumeName())
+          .setBucketName(bucketInfo.getBucketName())
+          .setAcls(bucketInfo.getAcls())
+          .setStorageType(bucketInfo.getStorageType())
+          .setIsVersionEnabled(bucketInfo.getIsVersionEnabled())
+          .setCreationTime(Time.now())
+          .build();
+      metadataManager.put(bucketKey, omBucketInfo.getProtobuf().toByteArray());
+
+      LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
+    } catch (IOException | DBException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Bucket creation failed for bucket:{} in volume:{}",
+            bucketName, volumeName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Returns Bucket Information.
+   *
+   * @param volumeName - Name of the Volume.
+   * @param bucketName - Name of the Bucket.
+   */
+  @Override
+  public OmBucketInfo getBucketInfo(String volumeName, String bucketName)
+      throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    Preconditions.checkNotNull(bucketName);
+    metadataManager.readLock().lock();
+    try {
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+      byte[] value = metadataManager.get(bucketKey);
+      if (value == null) {
+        LOG.debug("bucket: {} not found in volume: {}.", bucketName,
+            volumeName);
+        throw new OMException("Bucket not found",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+      }
+      return OmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(value));
+    } catch (IOException | DBException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Exception while getting bucket info for bucket: {}",
+            bucketName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  @Override
+  public void setBucketProperty(OmBucketArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    try {
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+      //Check if volume exists
+      if(metadataManager.get(metadataManager.getVolumeKey(volumeName)) ==
+          null) {
+        LOG.debug("volume: {} not found ", volumeName);
+        throw new OMException("Volume doesn't exist",
+            OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+      byte[] value = metadataManager.get(bucketKey);
+      //Check if bucket exist
+      if(value == null) {
+        LOG.debug("bucket: {} not found ", bucketName);
+        throw new OMException("Bucket doesn't exist",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+      }
+      OmBucketInfo oldBucketInfo = OmBucketInfo.getFromProtobuf(
+          BucketInfo.parseFrom(value));
+      OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder();
+      bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName())
+          .setBucketName(oldBucketInfo.getBucketName());
+
+      //Check ACLs to update
+      if(args.getAddAcls() != null || args.getRemoveAcls() != null) {
+        bucketInfoBuilder.setAcls(getUpdatedAclList(oldBucketInfo.getAcls(),
+            args.getRemoveAcls(), args.getAddAcls()));
+        LOG.debug("Updating ACLs for bucket: {} in volume: {}",
+            bucketName, volumeName);
+      } else {
+        bucketInfoBuilder.setAcls(oldBucketInfo.getAcls());
+      }
+
+      //Check StorageType to update
+      StorageType storageType = args.getStorageType();
+      if (storageType != null) {
+        bucketInfoBuilder.setStorageType(storageType);
+        LOG.debug("Updating bucket storage type for bucket: {} in volume: {}",
+            bucketName, volumeName);
+      } else {
+        bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType());
+      }
+
+      //Check Versioning to update
+      Boolean versioning = args.getIsVersionEnabled();
+      if (versioning != null) {
+        bucketInfoBuilder.setIsVersionEnabled(versioning);
+        LOG.debug("Updating bucket versioning for bucket: {} in volume: {}",
+            bucketName, volumeName);
+      } else {
+        bucketInfoBuilder
+            .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled());
+      }
+      bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime());
+
+      metadataManager.put(bucketKey,
+          bucketInfoBuilder.build().getProtobuf().toByteArray());
+    } catch (IOException | DBException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
+            bucketName, volumeName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Updates the existing ACL list with remove and add ACLs that are passed.
+   * Remove is done before Add.
+   *
+   * @param existingAcls - old ACL list.
+   * @param removeAcls - ACLs to be removed.
+   * @param addAcls - ACLs to be added.
+   * @return updated ACL list.
+   */
+  private List<OzoneAcl> getUpdatedAclList(List<OzoneAcl> existingAcls,
+      List<OzoneAcl> removeAcls, List<OzoneAcl> addAcls) {
+    if(removeAcls != null && !removeAcls.isEmpty()) {
+      existingAcls.removeAll(removeAcls);
+    }
+    if(addAcls != null && !addAcls.isEmpty()) {
+      addAcls.stream().filter(acl -> !existingAcls.contains(acl)).forEach(
+          existingAcls::add);
+    }
+    return existingAcls;
+  }
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volumeName - Name of the volume.
+   * @param bucketName - Name of the bucket.
+   * @throws IOException
+   */
+  public void deleteBucket(String volumeName, String bucketName)
+      throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    Preconditions.checkNotNull(bucketName);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+      //Check if volume exists
+      if (metadataManager.get(metadataManager.getVolumeKey(volumeName))
+          == null) {
+        LOG.debug("volume: {} not found ", volumeName);
+        throw new OMException("Volume doesn't exist",
+            OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+      //Check if bucket exist
+      if (metadataManager.get(bucketKey) == null) {
+        LOG.debug("bucket: {} not found ", bucketName);
+        throw new OMException("Bucket doesn't exist",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+      }
+      //Check if bucket is empty
+      if (!metadataManager.isBucketEmpty(volumeName, bucketName)) {
+        LOG.debug("bucket: {} is not empty ", bucketName);
+        throw new OMException("Bucket is not empty",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_EMPTY);
+      }
+      metadataManager.delete(bucketKey);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
+            volumeName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(String volumeName,
+                                        String startBucket, String bucketPrefix, int maxNumOfBuckets)
+      throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.listBuckets(
+          volumeName, startBucket, bucketPrefix, maxNumOfBuckets);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
new file mode 100644
index 0000000..ee23fe0
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.BackgroundTask;
+import org.apache.hadoop.utils.BackgroundTaskQueue;
+import org.apache.hadoop.utils.BackgroundTaskResult;
+import org.apache.hadoop.utils.BackgroundTaskResult.EmptyTaskResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT;
+
+/**
+ * This is the background service to delete keys.
+ * Scan the metadata of om periodically to get
+ * the keys with prefix "#deleting" and ask scm to
+ * delete metadata accordingly, if scm returns
+ * success for keys, then clean up those keys.
+ */
+public class KeyDeletingService extends BackgroundService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(KeyDeletingService.class);
+
+  // The thread pool size for key deleting service.
+  private final static int KEY_DELETING_CORE_POOL_SIZE = 2;
+
+  private final ScmBlockLocationProtocol scmClient;
+  private final KeyManager manager;
+  private final int keyLimitPerTask;
+
+  public KeyDeletingService(ScmBlockLocationProtocol scmClient,
+      KeyManager manager, long serviceInterval,
+      long serviceTimeout, Configuration conf) {
+    super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS,
+        KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
+    this.scmClient = scmClient;
+    this.manager = manager;
+    this.keyLimitPerTask = conf.getInt(OZONE_KEY_DELETING_LIMIT_PER_TASK,
+        OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new KeyDeletingTask());
+    return queue;
+  }
+
+  /**
+   * A key deleting task scans OM DB and looking for a certain number
+   * of pending-deletion keys, sends these keys along with their associated
+   * blocks to SCM for deletion. Once SCM confirms keys are deleted (once
+   * SCM persisted the blocks info in its deletedBlockLog), it removes
+   * these keys from the DB.
+   */
+  private class KeyDeletingTask implements
+      BackgroundTask<BackgroundTaskResult> {
+
+    @Override
+    public int getPriority() {
+      return 0;
+    }
+
+    @Override
+    public BackgroundTaskResult call() throws Exception {
+      try {
+        long startTime = Time.monotonicNow();
+        List<BlockGroup> keyBlocksList = manager
+            .getPendingDeletionKeys(keyLimitPerTask);
+        if (keyBlocksList.size() > 0) {
+          LOG.info("Found {} to-delete keys in OM", keyBlocksList.size());
+          List<DeleteBlockGroupResult> results =
+              scmClient.deleteKeyBlocks(keyBlocksList);
+          for (DeleteBlockGroupResult result : results) {
+            if (result.isSuccess()) {
+              try {
+                // Purge key from OM DB.
+                manager.deletePendingDeletionKey(result.getObjectKey());
+                LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
+              } catch (IOException e) {
+                // if a pending deletion key is failed to delete,
+                // print a warning here and retain it in this state,
+                // so that it can be attempt to delete next time.
+                LOG.warn("Failed to delete pending-deletion key {}",
+                    result.getObjectKey(), e);
+              }
+            } else {
+              // Key deletion failed, retry in next interval.
+              LOG.warn("Key {} deletion failed because some of the blocks"
+                  + " were failed to delete, failed blocks: {}",
+                  result.getObjectKey(),
+                  StringUtils.join(",", result.getFailedBlocks()));
+            }
+          }
+
+          if (!results.isEmpty()) {
+            LOG.info("Number of key deleted from OM DB: {},"
+                + " task elapsed time: {}ms",
+                results.size(), Time.monotonicNow() - startTime);
+          }
+
+          return results::size;
+        } else {
+          LOG.debug("No pending deletion key found in OM");
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to get pending deletion keys, retry in"
+            + " next interval", e);
+      }
+      return EmptyTaskResult.newResult();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
new file mode 100644
index 0000000..226c07d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Handles key level commands.
+ */
+public interface KeyManager {
+
+  /**
+   * Start key manager.
+   */
+  void start();
+
+  /**
+   * Stop key manager.
+   */
+  void stop() throws IOException;
+
+  /**
+   * After calling commit, the key will be made visible. There can be multiple
+   * open key writes in parallel (identified by client id). The most recently
+   * committed one will be the one visible.
+   *
+   * @param args the key to commit.
+   * @param clientID the client that is committing.
+   * @throws IOException
+   */
+  void commitKey(OmKeyArgs args, int clientID) throws IOException;
+
+  /**
+   * A client calls this on an open key, to request to allocate a new block,
+   * and appended to the tail of current block list of the open client.
+   *
+   * @param args the key to append
+   * @param clientID the client requesting block.
+   * @return the reference to the new block.
+   * @throws IOException
+   */
+  OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException;
+  /**
+   * Given the args of a key to put, write an open key entry to meta data.
+   *
+   * In case that the container creation or key write failed on
+   * DistributedStorageHandler, this key's metadata will still stay in OM.
+   * TODO garbage collect the open keys that never get closed
+   *
+   * @param args the args of the key provided by client.
+   * @return a OpenKeySession instance client uses to talk to container.
+   * @throws Exception
+   */
+  OpenKeySession openKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Look up an existing key. Return the info of the key to client side, which
+   * DistributedStorageHandler will use to access the data on datanode.
+   *
+   * @param args the args of the key provided by client.
+   * @return a OmKeyInfo instance client uses to talk to container.
+   * @throws IOException
+   */
+  OmKeyInfo lookupKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Renames an existing key within a bucket.
+   *
+   * @param args the args of the key provided by client.
+   * @param toKeyName New name to be used for the key
+   * @throws IOException if specified key doesn't exist or
+   * some other I/O errors while renaming the key.
+   */
+  void renameKey(OmKeyArgs args, String toKeyName) throws IOException;
+
+  /**
+   * Deletes an object by an object key. The key will be immediately removed
+   * from OM namespace and become invisible to clients. The object data
+   * will be removed in async manner that might retain for some time.
+   *
+   * @param args the args of the key provided by client.
+   * @throws IOException if specified key doesn't exist or
+   * some other I/O errors while deleting an object.
+   */
+  void deleteKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Returns a list of keys represented by {@link OmKeyInfo}
+   * in the given bucket.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param bucketName
+   *   the name of the bucket.
+   * @param startKey
+   *   the start key name, only the keys whose name is
+   *   after this value will be included in the result.
+   *   This key is excluded from the result.
+   * @param keyPrefix
+   *   key name prefix, only the keys whose name has
+   *   this prefix will be included in the result.
+   * @param maxKeys
+   *   the maximum number of keys to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of keys.
+   * @throws IOException
+   */
+  List<OmKeyInfo> listKeys(String volumeName,
+                           String bucketName, String startKey, String keyPrefix, int maxKeys)
+      throws IOException;
+
+  /**
+   * Returns a list of pending deletion key info that ups to the given count.
+   * Each entry is a {@link BlockGroup}, which contains the info about the
+   * key name and all its associated block IDs. A pending deletion key is
+   * stored with #deleting# prefix in OM DB.
+   *
+   * @param count max number of keys to return.
+   * @return a list of {@link BlockGroup} representing keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
+
+  /**
+   * Deletes a pending deletion key by its name. This is often called when
+   * key can be safely deleted from this layer. Once called, all footprints
+   * of the key will be purged from OM DB.
+   *
+   * @param objectKeyName object key name with #deleting# prefix.
+   * @throws IOException if specified key doesn't exist or other I/O errors.
+   */
+  void deletePendingDeletionKey(String objectKeyName) throws IOException;
+
+  /**
+   * Returns a list of all still open key info. Which contains the info about
+   * the key name and all its associated block IDs. A pending open key has
+   * prefix #open# in OM DB.
+   *
+   * @return a list of {@link BlockGroup} representing keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getExpiredOpenKeys() throws IOException;
+
+  /**
+   * Deletes a expired open key by its name. Called when a hanging key has been
+   * lingering for too long. Once called, the open key entries gets removed
+   * from OM mdata data.
+   *
+   * @param objectKeyName object key name with #open# prefix.
+   * @throws IOException if specified key doesn't exist or other I/O errors.
+   */
+  void deleteExpiredOpenKey(String objectKeyName) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
new file mode 100644
index 0000000..ba92a29
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -0,0 +1,566 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.BatchOperation;
+import org.iq80.leveldb.DBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB;
+import org.apache.hadoop.hdds.protocol
+    .proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol
+    .proto.HddsProtos.ReplicationFactor;
+
+
+/**
+ * Implementation of keyManager.
+ */
+public class KeyManagerImpl implements KeyManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(KeyManagerImpl.class);
+
+  /**
+   * A SCM block client, used to talk to SCM to allocate block during putKey.
+   */
+  private final ScmBlockLocationProtocol scmBlockClient;
+  private final OMMetadataManager metadataManager;
+  private final long scmBlockSize;
+  private final boolean useRatis;
+  private final BackgroundService keyDeletingService;
+  private final BackgroundService openKeyCleanupService;
+
+  private final long preallocateMax;
+  private final Random random;
+  private final String omId;
+
+  public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
+                        OMMetadataManager metadataManager, OzoneConfiguration conf,
+                        String omId) {
+    this.scmBlockClient = scmBlockClient;
+    this.metadataManager = metadataManager;
+    this.scmBlockSize = conf.getLong(OZONE_SCM_BLOCK_SIZE_IN_MB,
+        OZONE_SCM_BLOCK_SIZE_DEFAULT) * OzoneConsts.MB;
+    this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY,
+        DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
+    long  blockDeleteInterval = conf.getTimeDuration(
+        OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
+        OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    long serviceTimeout = conf.getTimeDuration(
+        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
+        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    this.preallocateMax = conf.getLong(
+        OZONE_KEY_PREALLOCATION_MAXSIZE,
+        OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT);
+    keyDeletingService = new KeyDeletingService(
+        scmBlockClient, this, blockDeleteInterval, serviceTimeout, conf);
+    int openkeyCheckInterval = conf.getInt(
+        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS,
+        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT);
+    openKeyCleanupService = new OpenKeyCleanupService(
+        scmBlockClient, this, openkeyCheckInterval, serviceTimeout);
+    random = new Random();
+    this.omId = omId;
+  }
+
+  @VisibleForTesting
+  public BackgroundService getOpenKeyCleanupService() {
+    return openKeyCleanupService;
+  }
+
+  @Override
+  public void start() {
+    keyDeletingService.start();
+    openKeyCleanupService.start();
+  }
+
+  @Override
+  public void stop() throws IOException {
+    keyDeletingService.shutdown();
+    openKeyCleanupService.shutdown();
+  }
+
+  private void validateBucket(String volumeName, String bucketName)
+      throws IOException {
+    byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
+    byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+
+    //Check if the volume exists
+    if(metadataManager.get(volumeKey) == null) {
+      LOG.error("volume not found: {}", volumeName);
+      throw new OMException("Volume not found",
+          OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+    //Check if bucket already exists
+    if(metadataManager.get(bucketKey) == null) {
+      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
+      throw new OMException("Bucket not found",
+          OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+    }
+  }
+
+  @Override
+  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+
+    try {
+      validateBucket(volumeName, bucketName);
+      String objectKey = metadataManager.getKeyWithDBPrefix(
+          volumeName, bucketName, keyName);
+      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
+      byte[] keyData = metadataManager.get(openKey);
+      if (keyData == null) {
+        LOG.error("Allocate block for a key not in open status in meta store " +
+            objectKey + " with ID " + clientID);
+        throw new OMException("Open Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      OmKeyInfo keyInfo =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData));
+      AllocatedBlock allocatedBlock =
+          scmBlockClient.allocateBlock(scmBlockSize, keyInfo.getType(),
+              keyInfo.getFactor(), omId);
+      OmKeyLocationInfo info = new OmKeyLocationInfo.Builder()
+          .setBlockID(allocatedBlock.getBlockID())
+          .setShouldCreateContainer(allocatedBlock.getCreateContainer())
+          .setLength(scmBlockSize)
+          .setOffset(0)
+          .build();
+      // current version not committed, so new blocks coming now are added to
+      // the same version
+      keyInfo.appendNewBlocks(Collections.singletonList(info));
+      keyInfo.updateModifcationTime();
+      metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
+      return info;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    ReplicationFactor factor = args.getFactor();
+    ReplicationType type = args.getType();
+
+    // If user does not specify a replication strategy or
+    // replication factor, OM will use defaults.
+    if(factor == null) {
+      factor = useRatis ? ReplicationFactor.THREE: ReplicationFactor.ONE;
+    }
+
+    if(type == null) {
+      type = useRatis ? ReplicationType.RATIS : ReplicationType.STAND_ALONE;
+    }
+
+    try {
+      validateBucket(volumeName, bucketName);
+      long requestedSize = Math.min(preallocateMax, args.getDataSize());
+      List<OmKeyLocationInfo> locations = new ArrayList<>();
+      String objectKey = metadataManager.getKeyWithDBPrefix(
+          volumeName, bucketName, keyName);
+      // requested size is not required but more like a optimization:
+      // SCM looks at the requested, if it 0, no block will be allocated at
+      // the point, if client needs more blocks, client can always call
+      // allocateBlock. But if requested size is not 0, OM will preallocate
+      // some blocks and piggyback to client, to save RPC calls.
+      while (requestedSize > 0) {
+        long allocateSize = Math.min(scmBlockSize, requestedSize);
+        AllocatedBlock allocatedBlock =
+            scmBlockClient.allocateBlock(allocateSize, type, factor, omId);
+        OmKeyLocationInfo subKeyInfo = new OmKeyLocationInfo.Builder()
+            .setBlockID(allocatedBlock.getBlockID())
+            .setShouldCreateContainer(allocatedBlock.getCreateContainer())
+            .setLength(allocateSize)
+            .setOffset(0)
+            .build();
+        locations.add(subKeyInfo);
+        requestedSize -= allocateSize;
+      }
+      // NOTE size of a key is not a hard limit on anything, it is a value that
+      // client should expect, in terms of current size of key. If client sets a
+      // value, then this value is used, otherwise, we allocate a single block
+      // which is the current size, if read by the client.
+      long size = args.getDataSize() >= 0 ? args.getDataSize() : scmBlockSize;
+      byte[] keyKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, keyName);
+      byte[] value = metadataManager.get(keyKey);
+      OmKeyInfo keyInfo;
+      long openVersion;
+      if (value != null) {
+        // the key already exist, the new blocks will be added as new version
+        keyInfo = OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
+        // when locations.size = 0, the new version will have identical blocks
+        // as its previous version
+        openVersion = keyInfo.addNewVersion(locations);
+        keyInfo.setDataSize(size + keyInfo.getDataSize());
+      } else {
+        // the key does not exist, create a new object, the new blocks are the
+        // version 0
+        long currentTime = Time.now();
+        keyInfo = new OmKeyInfo.Builder()
+            .setVolumeName(args.getVolumeName())
+            .setBucketName(args.getBucketName())
+            .setKeyName(args.getKeyName())
+            .setOmKeyLocationInfos(Collections.singletonList(
+                new OmKeyLocationInfoGroup(0, locations)))
+            .setCreationTime(currentTime)
+            .setModificationTime(currentTime)
+            .setDataSize(size)
+            .setReplicationType(type)
+            .setReplicationFactor(factor)
+            .build();
+        openVersion = 0;
+      }
+      // Generate a random ID which is not already in meta db.
+      int id = -1;
+      // in general this should finish in a couple times at most. putting some
+      // arbitrary large number here to avoid dead loop.
+      for (int j = 0; j < 10000; j++) {
+        id = random.nextInt();
+        byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, id);
+        if (metadataManager.get(openKey) == null) {
+          metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
+          break;
+        }
+      }
+      if (id == -1) {
+        throw new IOException("Failed to find a usable id for " + objectKey);
+      }
+      LOG.debug("Key {} allocated in volume {} bucket {}",
+          keyName, volumeName, bucketName);
+      return new OpenKeySession(id, keyInfo, openVersion);
+    } catch (OMException e) {
+      throw e;
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Key open failed for volume:{} bucket:{} key:{}",
+            volumeName, bucketName, keyName, ex);
+      }
+      throw new OMException(ex.getMessage(),
+          OMException.ResultCodes.FAILED_KEY_ALLOCATION);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void commitKey(OmKeyArgs args, int clientID) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    try {
+      validateBucket(volumeName, bucketName);
+      String objectKey = metadataManager.getKeyWithDBPrefix(
+          volumeName, bucketName, keyName);
+      byte[] objectKeyBytes = metadataManager.getDBKeyBytes(volumeName,
+          bucketName, keyName);
+      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
+      byte[] openKeyData = metadataManager.get(openKey);
+      if (openKeyData == null) {
+        throw new OMException("Commit a key without corresponding entry " +
+            DFSUtil.bytes2String(openKey), ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      OmKeyInfo keyInfo =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(openKeyData));
+      keyInfo.setDataSize(args.getDataSize());
+      keyInfo.setModificationTime(Time.now());
+      BatchOperation batch = new BatchOperation();
+      batch.delete(openKey);
+      batch.put(objectKeyBytes, keyInfo.getProtobuf().toByteArray());
+      metadataManager.writeBatch(batch);
+    } catch (OMException e) {
+      throw e;
+    } catch (IOException ex) {
+      LOG.error("Key commit failed for volume:{} bucket:{} key:{}",
+          volumeName, bucketName, keyName, ex);
+      throw new OMException(ex.getMessage(),
+          OMException.ResultCodes.FAILED_KEY_ALLOCATION);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    try {
+      byte[] keyKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, keyName);
+      byte[] value = metadataManager.get(keyKey);
+      if (value == null) {
+        LOG.debug("volume:{} bucket:{} Key:{} not found",
+            volumeName, bucketName, keyName);
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      return OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
+    } catch (DBException ex) {
+      LOG.error("Get key failed for volume:{} bucket:{} key:{}",
+          volumeName, bucketName, keyName, ex);
+      throw new OMException(ex.getMessage(),
+          OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+    Preconditions.checkNotNull(args);
+    Preconditions.checkNotNull(toKeyName);
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String fromKeyName = args.getKeyName();
+    if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
+          volumeName, bucketName, fromKeyName, toKeyName);
+      throw new OMException("Key name is empty",
+          ResultCodes.FAILED_INVALID_KEY_NAME);
+    }
+
+    metadataManager.writeLock().lock();
+    try {
+      // fromKeyName should exist
+      byte[] fromKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, fromKeyName);
+      byte[] fromKeyValue = metadataManager.get(fromKey);
+      if (fromKeyValue == null) {
+        // TODO: Add support for renaming open key
+        LOG.error(
+            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
+                + "Key: {} not found.", volumeName, bucketName, fromKeyName,
+            toKeyName, fromKeyName);
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+
+      // toKeyName should not exist
+      byte[] toKey =
+          metadataManager.getDBKeyBytes(volumeName, bucketName, toKeyName);
+      byte[] toKeyValue = metadataManager.get(toKey);
+      if (toKeyValue != null) {
+        LOG.error(
+            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
+                + "Key: {} already exists.", volumeName, bucketName,
+            fromKeyName, toKeyName, toKeyName);
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_ALREADY_EXISTS);
+      }
+
+      if (fromKeyName.equals(toKeyName)) {
+        return;
+      }
+
+      OmKeyInfo newKeyInfo =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(fromKeyValue));
+      newKeyInfo.setKeyName(toKeyName);
+      newKeyInfo.updateModifcationTime();
+      BatchOperation batch = new BatchOperation();
+      batch.delete(fromKey);
+      batch.put(toKey, newKeyInfo.getProtobuf().toByteArray());
+      metadataManager.writeBatch(batch);
+    } catch (DBException ex) {
+      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
+          volumeName, bucketName, fromKeyName, toKeyName, ex);
+      throw new OMException(ex.getMessage(),
+          ResultCodes.FAILED_KEY_RENAME);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void deleteKey(OmKeyArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    try {
+      byte[] objectKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, keyName);
+      byte[] objectValue = metadataManager.get(objectKey);
+      if (objectValue == null) {
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      byte[] deletingKey = metadataManager.getDeletedKeyName(objectKey);
+      BatchOperation batch = new BatchOperation();
+      batch.put(deletingKey, objectValue);
+      batch.delete(objectKey);
+      metadataManager.writeBatch(batch);
+    } catch (DBException ex) {
+      LOG.error(String.format("Delete key failed for volume:%s "
+          + "bucket:%s key:%s", volumeName, bucketName, keyName), ex);
+      throw new OMException(ex.getMessage(), ex,
+          ResultCodes.FAILED_KEY_DELETION);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String keyPrefix, int maxKeys) throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    Preconditions.checkNotNull(bucketName);
+
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.listKeys(volumeName, bucketName,
+          startKey, keyPrefix, maxKeys);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<BlockGroup> getPendingDeletionKeys(final int count)
+      throws IOException {
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.getPendingDeletionKeys(count);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void deletePendingDeletionKey(String objectKeyName)
+      throws IOException{
+    Preconditions.checkNotNull(objectKeyName);
+    if (!objectKeyName.startsWith(OzoneConsts.DELETING_KEY_PREFIX)) {
+      throw new IllegalArgumentException("Invalid key name,"
+          + " the name should be the key name with deleting prefix");
+    }
+
+    // Simply removes the entry from OM DB.
+    metadataManager.writeLock().lock();
+    try {
+      byte[] pendingDelKey = DFSUtil.string2Bytes(objectKeyName);
+      byte[] delKeyValue = metadataManager.get(pendingDelKey);
+      if (delKeyValue == null) {
+        throw new IOException("Failed to delete key " + objectKeyName
+            + " because it is not found in DB");
+      }
+      metadataManager.delete(pendingDelKey);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.getExpiredOpenKeys();
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void deleteExpiredOpenKey(String objectKeyName) throws IOException {
+    Preconditions.checkNotNull(objectKeyName);
+    if (!objectKeyName.startsWith(OzoneConsts.OPEN_KEY_PREFIX)) {
+      throw new IllegalArgumentException("Invalid key name,"
+          + " the name should be the key name with open key prefix");
+    }
+
+    // Simply removes the entry from OM DB.
+    metadataManager.writeLock().lock();
+    try {
+      byte[] openKey = DFSUtil.string2Bytes(objectKeyName);
+      byte[] delKeyValue = metadataManager.get(openKey);
+      if (delKeyValue == null) {
+        throw new IOException("Failed to delete key " + objectKeyName
+            + " because it is not found in DB");
+      }
+      metadataManager.delete(openKey);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
new file mode 100644
index 0000000..3ab9f47
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
+
+/**
+ * This is the JMX management interface for OM information.
+ */
+@InterfaceAudience.Private
+public interface OMMXBean extends ServiceRuntimeInfo {
+
+  String getRpcPort();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
new file mode 100644
index 0000000..f2e78e6
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -0,0 +1,253 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.utils.BatchOperation;
+import org.apache.hadoop.utils.MetadataStore;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.locks.Lock;
+
+/**
+ * OM metadata manager interface.
+ */
+public interface OMMetadataManager {
+  /**
+   * Start metadata manager.
+   */
+  void start();
+
+  /**
+   * Stop metadata manager.
+   */
+  void stop() throws IOException;
+
+  /**
+   * Get metadata store.
+   * @return metadata store.
+   */
+  @VisibleForTesting
+  MetadataStore getStore();
+
+  /**
+   * Returns the read lock used on Metadata DB.
+   * @return readLock
+   */
+  Lock readLock();
+
+  /**
+   * Returns the write lock used on Metadata DB.
+   * @return writeLock
+   */
+  Lock writeLock();
+
+  /**
+   * Returns the value associated with this key.
+   * @param key - key
+   * @return value
+   */
+  byte[] get(byte[] key) throws IOException;
+
+  /**
+   * Puts a Key into Metadata DB.
+   * @param key   - key
+   * @param value - value
+   */
+  void put(byte[] key, byte[] value) throws IOException;
+
+  /**
+   * Deletes a Key from Metadata DB.
+   * @param key   - key
+   */
+  void delete(byte[] key) throws IOException;
+
+  /**
+   * Atomic write a batch of operations.
+   * @param batch
+   * @throws IOException
+   */
+  void writeBatch(BatchOperation batch) throws IOException;
+
+  /**
+   * Given a volume return the corresponding DB key.
+   * @param volume - Volume name
+   */
+  byte[] getVolumeKey(String volume);
+
+  /**
+   * Given a user return the corresponding DB key.
+   * @param user - User name
+   */
+  byte[] getUserKey(String user);
+
+  /**
+   * Given a volume and bucket, return the corresponding DB key.
+   * @param volume - User name
+   * @param bucket - Bucket name
+   */
+  byte[] getBucketKey(String volume, String bucket);
+
+  /**
+   * Given a volume, bucket and a key, return the corresponding DB key.
+   * @param volume - volume name
+   * @param bucket - bucket name
+   * @param key - key name
+   * @return bytes of DB key.
+   */
+  byte[] getDBKeyBytes(String volume, String bucket, String key);
+
+  /**
+   * Returns the DB key name of a deleted key in OM metadata store.
+   * The name for a deleted key has prefix #deleting# followed by
+   * the actual key name.
+   * @param keyName - key name
+   * @return bytes of DB key.
+   */
+  byte[] getDeletedKeyName(byte[] keyName);
+
+  /**
+   * Returns the DB key name of a open key in OM metadata store.
+   * Should be #open# prefix followed by actual key name.
+   * @param keyName - key name
+   * @param id - the id for this open
+   * @return bytes of DB key.
+   */
+  byte[] getOpenKeyNameBytes(String keyName, int id);
+
+  /**
+   * Returns the full name of a key given volume name, bucket name and key name.
+   * Generally done by padding certain delimiters.
+   *
+   * @param volumeName - volume name
+   * @param bucketName - bucket name
+   * @param keyName - key name
+   * @return the full key name.
+   */
+  String getKeyWithDBPrefix(String volumeName, String bucketName,
+      String keyName);
+
+  /**
+   * Given a volume, check if it is empty,
+   * i.e there are no buckets inside it.
+   * @param volume - Volume name
+   */
+  boolean isVolumeEmpty(String volume) throws IOException;
+
+  /**
+   * Given a volume/bucket, check if it is empty,
+   * i.e there are no keys inside it.
+   * @param volume - Volume name
+   * @param  bucket - Bucket name
+   * @return true if the bucket is empty
+   */
+  boolean isBucketEmpty(String volume, String bucket) throws IOException;
+
+  /**
+   * Returns a list of buckets represented by {@link OmBucketInfo}
+   * in the given volume.
+   *
+   * @param volumeName
+   *   the name of the volume. This argument is required,
+   *   this method returns buckets in this given volume.
+   * @param startBucket
+   *   the start bucket name. Only the buckets whose name is
+   *   after this value will be included in the result.
+   *   This key is excluded from the result.
+   * @param bucketPrefix
+   *   bucket name prefix. Only the buckets whose name has
+   *   this prefix will be included in the result.
+   * @param maxNumOfBuckets
+   *   the maximum number of buckets to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of buckets.
+   * @throws IOException
+   */
+  List<OmBucketInfo> listBuckets(String volumeName, String startBucket,
+                                 String bucketPrefix, int maxNumOfBuckets) throws IOException;
+
+  /**
+   * Returns a list of keys represented by {@link OmKeyInfo}
+   * in the given bucket.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param bucketName
+   *   the name of the bucket.
+   * @param startKey
+   *   the start key name, only the keys whose name is
+   *   after this value will be included in the result.
+   *   This key is excluded from the result.
+   * @param keyPrefix
+   *   key name prefix, only the keys whose name has
+   *   this prefix will be included in the result.
+   * @param maxKeys
+   *   the maximum number of keys to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of keys.
+   * @throws IOException
+   */
+  List<OmKeyInfo> listKeys(String volumeName,
+                           String bucketName, String startKey, String keyPrefix, int maxKeys)
+      throws IOException;
+
+  /**
+   * Returns a list of volumes owned by a given user; if user is null,
+   * returns all volumes.
+   *
+   * @param userName
+   *   volume owner
+   * @param prefix
+   *   the volume prefix used to filter the listing result.
+   * @param startKey
+   *   the start volume name determines where to start listing from,
+   *   this key is excluded from the result.
+   * @param maxKeys
+   *   the maximum number of volumes to return.
+   * @return a list of {@link OmVolumeArgs}
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listVolumes(String userName, String prefix,
+                                 String startKey, int maxKeys) throws IOException;
+
+  /**
+   * Returns a list of pending deletion key info that ups to the given count.
+   * Each entry is a {@link BlockGroup}, which contains the info about the
+   * key name and all its associated block IDs. A pending deletion key is
+   * stored with #deleting# prefix in OM DB.
+   *
+   * @param count max number of keys to return.
+   * @return a list of {@link BlockGroup} represent keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
+
+  /**
+   * Returns a list of all still open key info. Which contains the info about
+   * the key name and all its associated block IDs. A pending open key has
+   * prefix #open# in OM DB.
+   *
+   * @return a list of {@link BlockGroup} representing keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getExpiredOpenKeys() throws IOException;
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: Revert "HDDS-224. Create metrics for Event Watcher."

Posted by bh...@apache.org.
Revert "HDDS-224. Create metrics for Event Watcher."

This reverts commit cb5e225868a069d6d16244b462ebada44465dce8.
The JIRA number is wrong, reverting to fix it.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c0a66ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c0a66ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c0a66ab

Branch: refs/heads/HDDS-48
Commit: 3c0a66abe632277e89fccd8dced9e71ca5d87df0
Parents: cb5e225
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jul 9 13:03:57 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jul 9 13:03:57 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/server/events/EventQueue.java   | 108 ++++++++-----------
 .../server/events/SingleThreadExecutor.java     |  35 ++----
 .../hdds/server/events/TestEventQueue.java      |  35 +++++-
 3 files changed, 87 insertions(+), 91 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0a66ab/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 7e29223..44d85f5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -18,11 +18,7 @@
 package org.apache.hadoop.hdds.server.events;
 
 import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -46,8 +42,6 @@ public class EventQueue implements EventPublisher, AutoCloseable {
   private static final Logger LOG =
       LoggerFactory.getLogger(EventQueue.class);
 
-  private static final String EXECUTOR_NAME_SEPARATOR = "For";
-
   private final Map<Event, Map<EventExecutor, List<EventHandler>>> executors =
       new HashMap<>();
 
@@ -57,73 +51,37 @@ public class EventQueue implements EventPublisher, AutoCloseable {
 
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
       EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
-    this.addHandler(event, handler, generateHandlerName(handler));
-  }
-
-  /**
-   * Add new handler to the event queue.
-   * <p>
-   * By default a separated single thread executor will be dedicated to
-   * deliver the events to the registered event handler.
-   *
-   * @param event        Triggering event.
-   * @param handler      Handler of event (will be called from a separated
-   *                     thread)
-   * @param handlerName  The name of handler (should be unique together with
-   *                     the event name)
-   * @param <PAYLOAD>    The type of the event payload.
-   * @param <EVENT_TYPE> The type of the event identifier.
-   */
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
-      EVENT_TYPE event, EventHandler<PAYLOAD> handler, String handlerName) {
-    validateEvent(event);
-    Preconditions.checkNotNull(handler, "Handler name should not be null.");
-    String executorName =
-        StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR
-            + handlerName;
-    this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
-  }
-
-  private <EVENT_TYPE extends Event<?>> void validateEvent(EVENT_TYPE event) {
-    Preconditions
-        .checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR),
-            "Event name should not contain " + EXECUTOR_NAME_SEPARATOR
-                + " string.");
 
+    this.addHandler(event, new SingleThreadExecutor<>(
+        event.getName()), handler);
   }
 
-  private <PAYLOAD> String generateHandlerName(EventHandler<PAYLOAD> handler) {
-    if (!"".equals(handler.getClass().getSimpleName())) {
-      return handler.getClass().getSimpleName();
-    } else {
-      return handler.getClass().getName();
-    }
-  }
-
-  /**
-   * Add event handler with custom executor.
-   *
-   * @param event        Triggering event.
-   * @param executor     The executor imlementation to deliver events from a
-   *                     separated threads. Please keep in your mind that
-   *                     registering metrics is the responsibility of the
-   *                     caller.
-   * @param handler      Handler of event (will be called from a separated
-   *                     thread)
-   * @param <PAYLOAD>    The type of the event payload.
-   * @param <EVENT_TYPE> The type of the event identifier.
-   */
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
-      EVENT_TYPE event, EventExecutor<PAYLOAD> executor,
+      EVENT_TYPE event,
+      EventExecutor<PAYLOAD> executor,
       EventHandler<PAYLOAD> handler) {
-    validateEvent(event);
+
     executors.putIfAbsent(event, new HashMap<>());
     executors.get(event).putIfAbsent(executor, new ArrayList<>());
 
-    executors.get(event).get(executor).add(handler);
+    executors.get(event)
+        .get(executor)
+        .add(handler);
   }
 
+  /**
+   * Creates one executor with multiple event handlers.
+   */
+  public void addHandlerGroup(String name, HandlerForEvent<?>...
+      eventsAndHandlers) {
+    SingleThreadExecutor sharedExecutor =
+        new SingleThreadExecutor(name);
+    for (HandlerForEvent handlerForEvent : eventsAndHandlers) {
+      addHandler(handlerForEvent.event, sharedExecutor,
+          handlerForEvent.handler);
+    }
 
+  }
 
   /**
    * Route an event with payload to the right listener(s).
@@ -225,5 +183,31 @@ public class EventQueue implements EventPublisher, AutoCloseable {
     });
   }
 
+  /**
+   * Event identifier together with the handler.
+   *
+   * @param <PAYLOAD>
+   */
+  public static class HandlerForEvent<PAYLOAD> {
+
+    private final Event<PAYLOAD> event;
+
+    private final EventHandler<PAYLOAD> handler;
+
+    public HandlerForEvent(
+        Event<PAYLOAD> event,
+        EventHandler<PAYLOAD> handler) {
+      this.event = event;
+      this.handler = handler;
+    }
+
+    public Event<PAYLOAD> getEvent() {
+      return event;
+    }
+
+    public EventHandler<PAYLOAD> getHandler() {
+      return handler;
+    }
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0a66ab/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
index 3253f2d..a64e3d7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
@@ -23,18 +23,13 @@ import org.slf4j.LoggerFactory;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * Simple EventExecutor to call all the event handler one-by-one.
  *
  * @param <T>
  */
-@Metrics(context = "EventQueue")
 public class SingleThreadExecutor<T> implements EventExecutor<T> {
 
   public static final String THREAD_NAME_PREFIX = "EventQueue";
@@ -46,24 +41,14 @@ public class SingleThreadExecutor<T> implements EventExecutor<T> {
 
   private final ThreadPoolExecutor executor;
 
-  @Metric
-  private MutableCounterLong queued;
+  private final AtomicLong queuedCount = new AtomicLong(0);
 
-  @Metric
-  private MutableCounterLong done;
+  private final AtomicLong successfulCount = new AtomicLong(0);
 
-  @Metric
-  private MutableCounterLong failed;
+  private final AtomicLong failedCount = new AtomicLong(0);
 
-  /**
-   * Create SingleThreadExecutor.
-   *
-   * @param name Unique name used in monitoring and metrics.
-   */
   public SingleThreadExecutor(String name) {
     this.name = name;
-    DefaultMetricsSystem.instance()
-        .register("EventQueue" + name, "Event Executor metrics ", this);
 
     LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>();
     executor =
@@ -79,31 +64,31 @@ public class SingleThreadExecutor<T> implements EventExecutor<T> {
   @Override
   public void onMessage(EventHandler<T> handler, T message, EventPublisher
       publisher) {
-    queued.incr();
+    queuedCount.incrementAndGet();
     executor.execute(() -> {
       try {
         handler.onMessage(message, publisher);
-        done.incr();
+        successfulCount.incrementAndGet();
       } catch (Exception ex) {
         LOG.error("Error on execution message {}", message, ex);
-        failed.incr();
+        failedCount.incrementAndGet();
       }
     });
   }
 
   @Override
   public long failedEvents() {
-    return failed.value();
+    return failedCount.get();
   }
 
   @Override
   public long successfulEvents() {
-    return done.value();
+    return successfulCount.get();
   }
 
   @Override
   public long queuedEvents() {
-    return queued.value();
+    return queuedCount.get();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0a66ab/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
index 2bdf705..3944409 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -25,8 +25,6 @@ import org.junit.Test;
 import java.util.Set;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-
 /**
  * Testing the basic functionality of the event queue.
  */
@@ -46,13 +44,11 @@ public class TestEventQueue {
 
   @Before
   public void startEventQueue() {
-    DefaultMetricsSystem.initialize(getClass().getSimpleName());
     queue = new EventQueue();
   }
 
   @After
   public void stopEventQueue() {
-    DefaultMetricsSystem.shutdown();
     queue.close();
   }
 
@@ -83,4 +79,35 @@ public class TestEventQueue {
 
   }
 
+  @Test
+  public void handlerGroup() {
+    final long[] result = new long[2];
+    queue.addHandlerGroup(
+        "group",
+        new EventQueue.HandlerForEvent<>(EVENT3, (payload, publisher) ->
+            result[0] = payload),
+        new EventQueue.HandlerForEvent<>(EVENT4, (payload, publisher) ->
+            result[1] = payload)
+    );
+
+    queue.fireEvent(EVENT3, 23L);
+    queue.fireEvent(EVENT4, 42L);
+
+    queue.processAll(1000);
+
+    Assert.assertEquals(23, result[0]);
+    Assert.assertEquals(42, result[1]);
+
+    Set<String> eventQueueThreadNames =
+        Thread.getAllStackTraces().keySet()
+            .stream()
+            .filter(t -> t.getName().startsWith(SingleThreadExecutor
+                .THREAD_NAME_PREFIX))
+            .map(Thread::getName)
+            .collect(Collectors.toSet());
+    System.out.println(eventQueueThreadNames);
+    Assert.assertEquals(1, eventQueueThreadNames.size());
+
+  }
+
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDDS-224. Create metrics for Event Watcher. Contributed b Elek, Marton.

Posted by bh...@apache.org.
HDDS-224. Create metrics for Event Watcher.
Contributed b Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb5e2258
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb5e2258
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb5e2258

Branch: refs/heads/HDDS-48
Commit: cb5e225868a069d6d16244b462ebada44465dce8
Parents: 4a08ddf
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jul 9 12:52:39 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jul 9 13:02:40 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/server/events/EventQueue.java   | 108 +++++++++++--------
 .../server/events/SingleThreadExecutor.java     |  35 ++++--
 .../hdds/server/events/TestEventQueue.java      |  35 +-----
 3 files changed, 91 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5e2258/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 44d85f5..7e29223 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -18,7 +18,11 @@
 package org.apache.hadoop.hdds.server.events;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,6 +46,8 @@ public class EventQueue implements EventPublisher, AutoCloseable {
   private static final Logger LOG =
       LoggerFactory.getLogger(EventQueue.class);
 
+  private static final String EXECUTOR_NAME_SEPARATOR = "For";
+
   private final Map<Event, Map<EventExecutor, List<EventHandler>>> executors =
       new HashMap<>();
 
@@ -51,38 +57,74 @@ public class EventQueue implements EventPublisher, AutoCloseable {
 
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
       EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
-
-    this.addHandler(event, new SingleThreadExecutor<>(
-        event.getName()), handler);
+    this.addHandler(event, handler, generateHandlerName(handler));
   }
 
+  /**
+   * Add new handler to the event queue.
+   * <p>
+   * By default a separated single thread executor will be dedicated to
+   * deliver the events to the registered event handler.
+   *
+   * @param event        Triggering event.
+   * @param handler      Handler of event (will be called from a separated
+   *                     thread)
+   * @param handlerName  The name of handler (should be unique together with
+   *                     the event name)
+   * @param <PAYLOAD>    The type of the event payload.
+   * @param <EVENT_TYPE> The type of the event identifier.
+   */
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
-      EVENT_TYPE event,
-      EventExecutor<PAYLOAD> executor,
-      EventHandler<PAYLOAD> handler) {
+      EVENT_TYPE event, EventHandler<PAYLOAD> handler, String handlerName) {
+    validateEvent(event);
+    Preconditions.checkNotNull(handler, "Handler name should not be null.");
+    String executorName =
+        StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR
+            + handlerName;
+    this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
+  }
 
-    executors.putIfAbsent(event, new HashMap<>());
-    executors.get(event).putIfAbsent(executor, new ArrayList<>());
+  private <EVENT_TYPE extends Event<?>> void validateEvent(EVENT_TYPE event) {
+    Preconditions
+        .checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR),
+            "Event name should not contain " + EXECUTOR_NAME_SEPARATOR
+                + " string.");
 
-    executors.get(event)
-        .get(executor)
-        .add(handler);
+  }
+
+  private <PAYLOAD> String generateHandlerName(EventHandler<PAYLOAD> handler) {
+    if (!"".equals(handler.getClass().getSimpleName())) {
+      return handler.getClass().getSimpleName();
+    } else {
+      return handler.getClass().getName();
+    }
   }
 
   /**
-   * Creates one executor with multiple event handlers.
+   * Add event handler with custom executor.
+   *
+   * @param event        Triggering event.
+   * @param executor     The executor imlementation to deliver events from a
+   *                     separated threads. Please keep in your mind that
+   *                     registering metrics is the responsibility of the
+   *                     caller.
+   * @param handler      Handler of event (will be called from a separated
+   *                     thread)
+   * @param <PAYLOAD>    The type of the event payload.
+   * @param <EVENT_TYPE> The type of the event identifier.
    */
-  public void addHandlerGroup(String name, HandlerForEvent<?>...
-      eventsAndHandlers) {
-    SingleThreadExecutor sharedExecutor =
-        new SingleThreadExecutor(name);
-    for (HandlerForEvent handlerForEvent : eventsAndHandlers) {
-      addHandler(handlerForEvent.event, sharedExecutor,
-          handlerForEvent.handler);
-    }
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
+      EVENT_TYPE event, EventExecutor<PAYLOAD> executor,
+      EventHandler<PAYLOAD> handler) {
+    validateEvent(event);
+    executors.putIfAbsent(event, new HashMap<>());
+    executors.get(event).putIfAbsent(executor, new ArrayList<>());
 
+    executors.get(event).get(executor).add(handler);
   }
 
+
+
   /**
    * Route an event with payload to the right listener(s).
    *
@@ -183,31 +225,5 @@ public class EventQueue implements EventPublisher, AutoCloseable {
     });
   }
 
-  /**
-   * Event identifier together with the handler.
-   *
-   * @param <PAYLOAD>
-   */
-  public static class HandlerForEvent<PAYLOAD> {
-
-    private final Event<PAYLOAD> event;
-
-    private final EventHandler<PAYLOAD> handler;
-
-    public HandlerForEvent(
-        Event<PAYLOAD> event,
-        EventHandler<PAYLOAD> handler) {
-      this.event = event;
-      this.handler = handler;
-    }
-
-    public Event<PAYLOAD> getEvent() {
-      return event;
-    }
-
-    public EventHandler<PAYLOAD> getHandler() {
-      return handler;
-    }
-  }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5e2258/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
index a64e3d7..3253f2d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
@@ -23,13 +23,18 @@ import org.slf4j.LoggerFactory;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 
 /**
  * Simple EventExecutor to call all the event handler one-by-one.
  *
  * @param <T>
  */
+@Metrics(context = "EventQueue")
 public class SingleThreadExecutor<T> implements EventExecutor<T> {
 
   public static final String THREAD_NAME_PREFIX = "EventQueue";
@@ -41,14 +46,24 @@ public class SingleThreadExecutor<T> implements EventExecutor<T> {
 
   private final ThreadPoolExecutor executor;
 
-  private final AtomicLong queuedCount = new AtomicLong(0);
+  @Metric
+  private MutableCounterLong queued;
 
-  private final AtomicLong successfulCount = new AtomicLong(0);
+  @Metric
+  private MutableCounterLong done;
 
-  private final AtomicLong failedCount = new AtomicLong(0);
+  @Metric
+  private MutableCounterLong failed;
 
+  /**
+   * Create SingleThreadExecutor.
+   *
+   * @param name Unique name used in monitoring and metrics.
+   */
   public SingleThreadExecutor(String name) {
     this.name = name;
+    DefaultMetricsSystem.instance()
+        .register("EventQueue" + name, "Event Executor metrics ", this);
 
     LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>();
     executor =
@@ -64,31 +79,31 @@ public class SingleThreadExecutor<T> implements EventExecutor<T> {
   @Override
   public void onMessage(EventHandler<T> handler, T message, EventPublisher
       publisher) {
-    queuedCount.incrementAndGet();
+    queued.incr();
     executor.execute(() -> {
       try {
         handler.onMessage(message, publisher);
-        successfulCount.incrementAndGet();
+        done.incr();
       } catch (Exception ex) {
         LOG.error("Error on execution message {}", message, ex);
-        failedCount.incrementAndGet();
+        failed.incr();
       }
     });
   }
 
   @Override
   public long failedEvents() {
-    return failedCount.get();
+    return failed.value();
   }
 
   @Override
   public long successfulEvents() {
-    return successfulCount.get();
+    return done.value();
   }
 
   @Override
   public long queuedEvents() {
-    return queuedCount.get();
+    return queued.value();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5e2258/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
index 3944409..2bdf705 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -25,6 +25,8 @@ import org.junit.Test;
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
 /**
  * Testing the basic functionality of the event queue.
  */
@@ -44,11 +46,13 @@ public class TestEventQueue {
 
   @Before
   public void startEventQueue() {
+    DefaultMetricsSystem.initialize(getClass().getSimpleName());
     queue = new EventQueue();
   }
 
   @After
   public void stopEventQueue() {
+    DefaultMetricsSystem.shutdown();
     queue.close();
   }
 
@@ -79,35 +83,4 @@ public class TestEventQueue {
 
   }
 
-  @Test
-  public void handlerGroup() {
-    final long[] result = new long[2];
-    queue.addHandlerGroup(
-        "group",
-        new EventQueue.HandlerForEvent<>(EVENT3, (payload, publisher) ->
-            result[0] = payload),
-        new EventQueue.HandlerForEvent<>(EVENT4, (payload, publisher) ->
-            result[1] = payload)
-    );
-
-    queue.fireEvent(EVENT3, 23L);
-    queue.fireEvent(EVENT4, 42L);
-
-    queue.processAll(1000);
-
-    Assert.assertEquals(23, result[0]);
-    Assert.assertEquals(42, result[1]);
-
-    Set<String> eventQueueThreadNames =
-        Thread.getAllStackTraces().keySet()
-            .stream()
-            .filter(t -> t.getName().startsWith(SingleThreadExecutor
-                .THREAD_NAME_PREFIX))
-            .map(Thread::getName)
-            .collect(Collectors.toSet());
-    System.out.println(eventQueueThreadNames);
-    Assert.assertEquals(1, eventQueueThreadNames.size());
-
-  }
-
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk

Posted by bh...@apache.org.
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da507afa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da507afa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da507afa

Branch: refs/heads/HDDS-48
Commit: da507afa351f9257960937e852c588e261c423b6
Parents: 021ab63 cb5e225
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Jul 9 13:06:46 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Jul 9 13:06:46 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/server/events/EventQueue.java   | 108 +++++++++++--------
 .../server/events/SingleThreadExecutor.java     |  35 ++++--
 .../hdds/server/events/TestEventQueue.java      |  35 +-----
 3 files changed, 91 insertions(+), 87 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HDDS-224. Create metrics for Event Watcher. Contributed by Elek, Marton.

Posted by bh...@apache.org.
HDDS-224. Create metrics for Event Watcher.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e12d93bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e12d93bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e12d93bf

Branch: refs/heads/HDDS-48
Commit: e12d93bfc1a0efd007bc84758e60b5149c3aa663
Parents: 895845e
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jul 9 12:02:20 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jul 9 12:10:12 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/framework/pom.xml                   |   5 +
 .../hadoop/hdds/server/events/EventWatcher.java |  43 +++++++-
 .../hdds/server/events/EventWatcherMetrics.java |  79 ++++++++++++++
 .../hdds/server/events/TestEventWatcher.java    | 107 ++++++++++++++++---
 4 files changed, 220 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e12d93bf/hadoop-hdds/framework/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index a497133..6e1927d 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -39,6 +39,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>hadoop-hdds-common</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e12d93bf/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index 19fddde..8c5605a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -26,12 +26,17 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.lease.Lease;
 import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException;
 import org.apache.hadoop.ozone.lease.LeaseExpiredException;
 import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.commons.collections.map.HashedMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,18 +63,39 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   private final LeaseManager<UUID> leaseManager;
 
+  private final EventWatcherMetrics metrics;
+
+  private final String name;
+
   protected final Map<UUID, TIMEOUT_PAYLOAD> trackedEventsByUUID =
       new ConcurrentHashMap<>();
 
   protected final Set<TIMEOUT_PAYLOAD> trackedEvents = new HashSet<>();
 
-  public EventWatcher(Event<TIMEOUT_PAYLOAD> startEvent,
+  private final Map<UUID, Long> startTrackingTimes = new HashedMap();
+
+  public EventWatcher(String name, Event<TIMEOUT_PAYLOAD> startEvent,
       Event<COMPLETION_PAYLOAD> completionEvent,
       LeaseManager<UUID> leaseManager) {
     this.startEvent = startEvent;
     this.completionEvent = completionEvent;
     this.leaseManager = leaseManager;
+    this.metrics = new EventWatcherMetrics();
+    Preconditions.checkNotNull(name);
+    if (name.equals("")) {
+      name = getClass().getSimpleName();
+    }
+    if (name.equals("")) {
+      //for anonymous inner classes
+      name = getClass().getName();
+    }
+    this.name = name;
+  }
 
+  public EventWatcher(Event<TIMEOUT_PAYLOAD> startEvent,
+      Event<COMPLETION_PAYLOAD> completionEvent,
+      LeaseManager<UUID> leaseManager) {
+    this("", startEvent, completionEvent, leaseManager);
   }
 
   public void start(EventQueue queue) {
@@ -87,11 +113,16 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
       }
     });
 
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.register(name, "EventWatcher metrics", metrics);
   }
 
   private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload,
       EventPublisher publisher) {
+    metrics.incrementTrackedEvents();
     UUID identifier = payload.getUUID();
+    startTrackingTimes.put(identifier, System.currentTimeMillis());
+
     trackedEventsByUUID.put(identifier, payload);
     trackedEvents.add(payload);
     try {
@@ -112,16 +143,21 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   private synchronized void handleCompletion(UUID uuid,
       EventPublisher publisher) throws LeaseNotFoundException {
+    metrics.incrementCompletedEvents();
     leaseManager.release(uuid);
     TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(uuid);
     trackedEvents.remove(payload);
+    long originalTime = startTrackingTimes.remove(uuid);
+    metrics.updateFinishingTime(System.currentTimeMillis() - originalTime);
     onFinished(publisher, payload);
   }
 
   private synchronized void handleTimeout(EventPublisher publisher,
       UUID identifier) {
+    metrics.incrementTimedOutEvents();
     TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(identifier);
     trackedEvents.remove(payload);
+    startTrackingTimes.remove(payload.getUUID());
     onTimeout(publisher, payload);
   }
 
@@ -154,4 +190,9 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
     return trackedEventsByUUID.values().stream().filter(predicate)
         .collect(Collectors.toList());
   }
+
+  @VisibleForTesting
+  protected EventWatcherMetrics getMetrics() {
+    return metrics;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e12d93bf/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
new file mode 100644
index 0000000..1db81a9
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Metrics for any event watcher.
+ */
+public class EventWatcherMetrics {
+
+  @Metric()
+  private MutableCounterLong trackedEvents;
+
+  @Metric()
+  private MutableCounterLong timedOutEvents;
+
+  @Metric()
+  private MutableCounterLong completedEvents;
+
+  @Metric()
+  private MutableRate completionTime;
+
+  public void incrementTrackedEvents() {
+    trackedEvents.incr();
+  }
+
+  public void incrementTimedOutEvents() {
+    timedOutEvents.incr();
+  }
+
+  public void incrementCompletedEvents() {
+    completedEvents.incr();
+  }
+
+  @VisibleForTesting
+  public void updateFinishingTime(long duration) {
+    completionTime.add(duration);
+  }
+
+  @VisibleForTesting
+  public MutableCounterLong getTrackedEvents() {
+    return trackedEvents;
+  }
+
+  @VisibleForTesting
+  public MutableCounterLong getTimedOutEvents() {
+    return timedOutEvents;
+  }
+
+  @VisibleForTesting
+  public MutableCounterLong getCompletedEvents() {
+    return completedEvents;
+  }
+
+  @VisibleForTesting
+  public MutableRate getCompletionTime() {
+    return completionTime;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e12d93bf/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
index 1731350..38e1554 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
@@ -21,8 +21,13 @@ import java.util.List;
 import java.util.Objects;
 import java.util.UUID;
 
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.lease.LeaseManager;
+import org.apache.hadoop.test.MetricsAsserts;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -46,6 +51,7 @@ public class TestEventWatcher {
 
   @Before
   public void startLeaseManager() {
+    DefaultMetricsSystem.instance();
     leaseManager = new LeaseManager<>(2000l);
     leaseManager.start();
   }
@@ -53,12 +59,12 @@ public class TestEventWatcher {
   @After
   public void stopLeaseManager() {
     leaseManager.shutdown();
+    DefaultMetricsSystem.shutdown();
   }
 
 
   @Test
   public void testEventHandling() throws InterruptedException {
-
     EventQueue queue = new EventQueue();
 
     EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
@@ -139,26 +145,101 @@ public class TestEventWatcher {
     Assert.assertEquals(0, c1todo.size());
     Assert.assertFalse(replicationWatcher.contains(event1));
 
+  }
+
+  @Test
+  public void testMetrics() throws InterruptedException {
+
+    DefaultMetricsSystem.initialize("test");
+
+    EventQueue queue = new EventQueue();
+
+    EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
+        replicationWatcher = createEventWatcher();
+
+    EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
+        new EventHandlerStub<>();
+
+    queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
+
+    replicationWatcher.start(queue);
+
+    //send 3 event to track 3 in-progress activity
+    UnderreplicatedEvent event1 =
+        new UnderreplicatedEvent(UUID.randomUUID(), "C1");
+
+    UnderreplicatedEvent event2 =
+        new UnderreplicatedEvent(UUID.randomUUID(), "C2");
+
+    UnderreplicatedEvent event3 =
+        new UnderreplicatedEvent(UUID.randomUUID(), "C1");
+
+    queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
+
+    queue.fireEvent(WATCH_UNDER_REPLICATED, event2);
+
+    queue.fireEvent(WATCH_UNDER_REPLICATED, event3);
+
+    //1st event is completed, don't need to track any more
+    ReplicationCompletedEvent event1Completed =
+        new ReplicationCompletedEvent(event1.UUID, "C1", "D1");
+
+    queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
+
+
+    Thread.sleep(2200l);
+
+    //until now: 3 in-progress activities are tracked with three
+    // UnderreplicatedEvents. The first one is completed, the remaining two
+    // are timed out (as the timeout -- defined in the leasmanager -- is 2000ms.
 
+    EventWatcherMetrics metrics = replicationWatcher.getMetrics();
+
+    //3 events are received
+    Assert.assertEquals(3, metrics.getTrackedEvents().value());
+
+    //one is finished. doesn't need to be resent
+    Assert.assertEquals(1, metrics.getCompletedEvents().value());
+
+    //Other two are timed out and resent
+    Assert.assertEquals(2, metrics.getTimedOutEvents().value());
+
+    DefaultMetricsSystem.shutdown();
   }
 
   private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
   createEventWatcher() {
-    return new EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>(
-        WATCH_UNDER_REPLICATED, REPLICATION_COMPLETED, leaseManager) {
+    return new CommandWatcherExample(WATCH_UNDER_REPLICATED,
+        REPLICATION_COMPLETED, leaseManager);
+  }
 
-      @Override
-      void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) {
-        publisher.fireEvent(UNDER_REPLICATED, payload);
-      }
+  private class CommandWatcherExample
+      extends EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> {
 
-      @Override
-      void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) {
-        //Good job. We did it.
-      }
-    };
+    public CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
+        Event<ReplicationCompletedEvent> completionEvent,
+        LeaseManager<UUID> leaseManager) {
+      super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
+    }
+
+    @Override
+    void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) {
+      publisher.fireEvent(UNDER_REPLICATED, payload);
+    }
+
+    @Override
+    void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) {
+      //Good job. We did it.
+    }
+
+    @Override
+    public EventWatcherMetrics getMetrics() {
+      return super.getMetrics();
+    }
   }
 
+  ;
+
   private static class ReplicationCompletedEvent
       implements IdentifiableEventPayload {
 
@@ -217,4 +298,4 @@ public class TestEventWatcher {
     }
   }
 
-}
\ No newline at end of file
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: Revert "Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk"

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
deleted file mode 100644
index 924411a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-import com.google.common.collect.Sets;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-
-import java.util.List;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This test helper class is primarily used by
- * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
- */
-public class FairSchedulerJsonVerifications {
-
-  private static final Set<String> RESOURCE_FIELDS =
-      Sets.newHashSet("minResources", "amUsedResources", "amMaxResources",
-          "fairResources", "clusterResources", "reservedResources",
-              "maxResources", "usedResources", "steadyFairResources",
-              "demandResources");
-  private final Set<String> customResourceTypes;
-
-  FairSchedulerJsonVerifications(List<String> customResourceTypes) {
-    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
-  }
-
-  public void verify(JSONObject jsonObject) {
-    try {
-      verifyResourcesContainDefaultResourceTypes(jsonObject, RESOURCE_FIELDS);
-      verifyResourcesContainCustomResourceTypes(jsonObject, RESOURCE_FIELDS);
-    } catch (JSONException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void verifyResourcesContainDefaultResourceTypes(JSONObject queue,
-      Set<String> resourceCategories) throws JSONException {
-    for (String resourceCategory : resourceCategories) {
-      boolean hasResourceCategory = queue.has(resourceCategory);
-      assertTrue("Queue " + queue + " does not have resource category key: "
-          + resourceCategory, hasResourceCategory);
-      verifyResourceContainsDefaultResourceTypes(
-          queue.getJSONObject(resourceCategory));
-    }
-  }
-
-  private void verifyResourceContainsDefaultResourceTypes(
-      JSONObject jsonObject) {
-    Object memory = jsonObject.opt("memory");
-    Object vCores = jsonObject.opt("vCores");
-
-    assertNotNull("Key 'memory' not found in: " + jsonObject, memory);
-    assertNotNull("Key 'vCores' not found in: " + jsonObject, vCores);
-  }
-
-  private void verifyResourcesContainCustomResourceTypes(JSONObject queue,
-      Set<String> resourceCategories) throws JSONException {
-    for (String resourceCategory : resourceCategories) {
-      assertTrue("Queue " + queue + " does not have resource category key: "
-          + resourceCategory, queue.has(resourceCategory));
-      verifyResourceContainsAllCustomResourceTypes(
-          queue.getJSONObject(resourceCategory));
-    }
-  }
-
-  private void verifyResourceContainsAllCustomResourceTypes(
-      JSONObject resourceCategory) throws JSONException {
-    assertTrue("resourceCategory does not have resourceInformations: "
-        + resourceCategory, resourceCategory.has("resourceInformations"));
-
-    JSONObject resourceInformations =
-        resourceCategory.getJSONObject("resourceInformations");
-    assertTrue(
-        "resourceInformations does not have resourceInformation object: "
-            + resourceInformations,
-        resourceInformations.has("resourceInformation"));
-    JSONArray customResources =
-        resourceInformations.getJSONArray("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        customResourceTypes.size(), customResources.length() - 2);
-
-    for (int i = 0; i < customResources.length(); i++) {
-      JSONObject customResource = customResources.getJSONObject(i);
-      assertTrue("Resource type does not have name field: " + customResource,
-          customResource.has("name"));
-      assertTrue("Resource type does not have name resourceType field: "
-          + customResource, customResource.has("resourceType"));
-      assertTrue(
-          "Resource type does not have name units field: " + customResource,
-          customResource.has("units"));
-      assertTrue(
-          "Resource type does not have name value field: " + customResource,
-          customResource.has("value"));
-
-      String name = customResource.getString("name");
-      String unit = customResource.getString("units");
-      String resourceType = customResource.getString("resourceType");
-      Long value = customResource.getLong("value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          customResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Custom resource value " + value + " is null!", value);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
deleted file mode 100644
index 63ae7b7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-
-import com.google.common.collect.Sets;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This test helper class is primarily used by
- * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
- */
-public class FairSchedulerXmlVerifications {
-
-  private static final Set<String> RESOURCE_FIELDS = Sets.newHashSet(
-      "minResources", "amUsedResources", "amMaxResources", "fairResources",
-      "clusterResources", "reservedResources", "maxResources", "usedResources",
-      "steadyFairResources", "demandResources");
-  private final Set<String> customResourceTypes;
-
-  FairSchedulerXmlVerifications(List<String> customResourceTypes) {
-    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
-  }
-
-  public void verify(Element element) {
-    verifyResourcesContainDefaultResourceTypes(element, RESOURCE_FIELDS);
-    verifyResourcesContainCustomResourceTypes(element, RESOURCE_FIELDS);
-  }
-
-  private void verifyResourcesContainDefaultResourceTypes(Element queue,
-      Set<String> resourceCategories) {
-    for (String resourceCategory : resourceCategories) {
-      boolean hasResourceCategory = hasChild(queue, resourceCategory);
-      assertTrue("Queue " + queue + " does not have resource category key: "
-          + resourceCategory, hasResourceCategory);
-      verifyResourceContainsDefaultResourceTypes(
-              (Element) queue.getElementsByTagName(resourceCategory).item(0));
-    }
-  }
-
-  private void verifyResourceContainsDefaultResourceTypes(
-      Element element) {
-    Object memory = opt(element, "memory");
-    Object vCores = opt(element, "vCores");
-
-    assertNotNull("Key 'memory' not found in: " + element, memory);
-    assertNotNull("Key 'vCores' not found in: " + element, vCores);
-  }
-
-  private void verifyResourcesContainCustomResourceTypes(Element queue,
-      Set<String> resourceCategories) {
-    for (String resourceCategory : resourceCategories) {
-      assertTrue("Queue " + queue + " does not have key for resourceCategory: "
-          + resourceCategory, hasChild(queue, resourceCategory));
-      verifyResourceContainsCustomResourceTypes(
-              (Element) queue.getElementsByTagName(resourceCategory).item(0));
-    }
-  }
-
-  private void verifyResourceContainsCustomResourceTypes(
-      Element resourceCategory) {
-    assertEquals(
-        toXml(resourceCategory)
-            + " should have only one resourceInformations child!",
-        1, resourceCategory.getElementsByTagName("resourceInformations")
-            .getLength());
-    Element resourceInformations = (Element) resourceCategory
-        .getElementsByTagName("resourceInformations").item(0);
-
-    NodeList customResources =
-        resourceInformations.getElementsByTagName("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        customResourceTypes.size(), customResources.getLength() - 2);
-
-    for (int i = 0; i < customResources.getLength(); i++) {
-      Element customResource = (Element) customResources.item(i);
-      String name = getXmlString(customResource, "name");
-      String unit = getXmlString(customResource, "units");
-      String resourceType = getXmlString(customResource, "resourceType");
-      Long value = getXmlLong(customResource, "value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          customResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Resource value should not be null for resource type "
-          + resourceType + ", listing xml contents: " + toXml(customResource),
-          value);
-    }
-  }
-
-  private Object opt(Node node, String child) {
-    NodeList nodes = getElementsByTagNameInternal(node, child);
-    if (nodes.getLength() > 0) {
-      return nodes.item(0);
-    }
-
-    return null;
-  }
-
-  private boolean hasChild(Node node, String child) {
-    return getElementsByTagNameInternal(node, child).getLength() > 0;
-  }
-
-  private NodeList getElementsByTagNameInternal(Node node, String child) {
-    if (node instanceof Element) {
-      return ((Element) node).getElementsByTagName(child);
-    } else if (node instanceof Document) {
-      return ((Document) node).getElementsByTagName(child);
-    } else {
-      throw new IllegalStateException("Unknown type of wrappedObject: " + node
-          + ", type: " + node.getClass());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
deleted file mode 100644
index de4d5a1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.*;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
-import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
-import org.apache.hadoop.yarn.webapp.JerseyTestBase;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.w3c.dom.Element;
-import javax.ws.rs.core.MediaType;
-import java.lang.reflect.Method;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class is to test response representations of queue resources,
- * explicitly setting custom resource types. with the help of
- * {@link CustomResourceTypesConfigurationProvider}
- */
-public class TestRMWebServicesFairSchedulerCustomResourceTypes
-    extends JerseyTestBase {
-  private static MockRM rm;
-  private static YarnConfiguration conf;
-
-  private static class WebServletModule extends ServletModule {
-    @Override
-    protected void configureServlets() {
-      bind(JAXBContextResolver.class);
-      bind(RMWebServices.class);
-      bind(GenericExceptionHandler.class);
-      conf = new YarnConfiguration();
-      conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-          ResourceScheduler.class);
-      initResourceTypes(conf);
-      rm = new MockRM(conf);
-      bind(ResourceManager.class).toInstance(rm);
-      serve("/*").with(GuiceContainer.class);
-    }
-
-    private void initResourceTypes(YarnConfiguration conf) {
-      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
-          CustomResourceTypesConfigurationProvider.class.getName());
-      ResourceUtils.resetResourceTypes(conf);
-    }
-  }
-
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    createInjectorForWebServletModule();
-  }
-
-  @After
-  public void tearDown() {
-    ResourceUtils.resetResourceTypes(new Configuration());
-  }
-
-  private void createInjectorForWebServletModule() {
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
-  @After
-  public void teardown() {
-    CustomResourceTypesConfigurationProvider.reset();
-  }
-
-  public TestRMWebServicesFairSchedulerCustomResourceTypes() {
-    super(new WebAppDescriptor.Builder(
-        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-            .contextListenerClass(GuiceServletConfig.class)
-            .filterClass(com.google.inject.servlet.GuiceFilter.class)
-            .contextPath("jersey-guice-filter").servletPath("/").build());
-  }
-
-  @Test
-  public void testClusterSchedulerWithCustomResourceTypesJson() {
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-
-    verifyJsonResponse(path, response,
-            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  @Test
-  public void testClusterSchedulerWithCustomResourceTypesXml() {
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-
-    verifyXmlResponse(path, response,
-        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  @Test
-  public void testClusterSchedulerWithElevenCustomResourceTypesXml() {
-    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
-    createInjectorForWebServletModule();
-
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-
-    verifyXmlResponse(path, response,
-        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  @Test
-  public void testClusterSchedulerElevenWithCustomResourceTypesJson() {
-    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
-    createInjectorForWebServletModule();
-
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-
-    verifyJsonResponse(path, response,
-        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  private void verifyJsonResponse(WebResource path, ClientResponse response,
-      List<String> customResourceTypes) {
-    JsonCustomResourceTypeTestcase testCase =
-        new JsonCustomResourceTypeTestcase(path,
-            new BufferedClientResponse(response));
-    testCase.verify(json -> {
-      try {
-        JSONArray queues = json.getJSONObject("scheduler")
-            .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-            .getJSONObject("childQueues").getJSONArray("queue");
-
-        // childQueueInfo consists of subqueue1 and subqueue2 info
-        assertEquals(2, queues.length());
-        JSONObject firstChildQueue = queues.getJSONObject(0);
-        new FairSchedulerJsonVerifications(customResourceTypes)
-            .verify(firstChildQueue);
-      } catch (JSONException e) {
-        throw new RuntimeException(e);
-      }
-    });
-  }
-
-  private void verifyXmlResponse(WebResource path, ClientResponse response,
-          List<String> customResourceTypes) {
-    XmlCustomResourceTypeTestCase testCase = new XmlCustomResourceTypeTestCase(
-        path, new BufferedClientResponse(response));
-
-    testCase.verify(xml -> {
-      Element scheduler =
-          (Element) xml.getElementsByTagName("scheduler").item(0);
-      Element schedulerInfo =
-          (Element) scheduler.getElementsByTagName("schedulerInfo").item(0);
-      Element rootQueue =
-          (Element) schedulerInfo.getElementsByTagName("rootQueue").item(0);
-
-      Element childQueues =
-          (Element) rootQueue.getElementsByTagName("childQueues").item(0);
-      Element queue =
-          (Element) childQueues.getElementsByTagName("queue").item(0);
-      new FairSchedulerXmlVerifications(customResourceTypes).verify(queue);
-    });
-  }
-
-  private void incrementUsedResourcesOnQueue(final FSLeafQueue queue,
-      final long value) {
-    try {
-      Method incUsedResourceMethod = queue.getClass().getSuperclass()
-          .getDeclaredMethod("incUsedResource", Resource.class);
-      incUsedResourceMethod.setAccessible(true);
-
-      Map<String, Long> customResources =
-          CustomResourceTypesConfigurationProvider.getCustomResourceTypes()
-              .stream()
-              .collect(Collectors.toMap(Function.identity(), v -> value));
-
-      incUsedResourceMethod.invoke(queue,
-          Resource.newInstance(20, 30, customResources));
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
deleted file mode 100644
index 4ab1443..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringEqual;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
-import static org.junit.Assert.*;
-
-/**
- * Contains all value verifications that are needed to verify {@link AppInfo}
- * JSON objects.
- */
-public final class AppInfoJsonVerifications {
-
-  private AppInfoJsonVerifications() {
-    //utility class
-  }
-
-  /**
-   * Tests whether {@link AppInfo} representation object contains the required
-   * values as per defined in the specified app parameter.
-   * @param  app  an RMApp instance that contains the required values
-   *              to test against.
-   */
-  public static void verify(JSONObject info, RMApp app) throws JSONException {
-    checkStringMatch("id", app.getApplicationId().toString(),
-        info.getString("id"));
-    checkStringMatch("user", app.getUser(), info.getString("user"));
-    checkStringMatch("name", app.getName(), info.getString("name"));
-    checkStringMatch("applicationType", app.getApplicationType(),
-        info.getString("applicationType"));
-    checkStringMatch("queue", app.getQueue(), info.getString("queue"));
-    assertEquals("priority doesn't match", 0, info.getInt("priority"));
-    checkStringMatch("state", app.getState().toString(),
-        info.getString("state"));
-    checkStringMatch("finalStatus", app.getFinalApplicationStatus().toString(),
-        info.getString("finalStatus"));
-    assertEquals("progress doesn't match", 0,
-        (float) info.getDouble("progress"), 0.0);
-    if ("UNASSIGNED".equals(info.getString("trackingUI"))) {
-      checkStringMatch("trackingUI", "UNASSIGNED",
-          info.getString("trackingUI"));
-    }
-    checkStringEqual("diagnostics", app.getDiagnostics().toString(),
-        info.getString("diagnostics"));
-    assertEquals("clusterId doesn't match",
-        ResourceManager.getClusterTimeStamp(), info.getLong("clusterId"));
-    assertEquals("startedTime doesn't match", app.getStartTime(),
-        info.getLong("startedTime"));
-    assertEquals("finishedTime doesn't match", app.getFinishTime(),
-        info.getLong("finishedTime"));
-    assertTrue("elapsed time not greater than 0",
-        info.getLong("elapsedTime") > 0);
-    checkStringMatch("amHostHttpAddress",
-        app.getCurrentAppAttempt().getMasterContainer().getNodeHttpAddress(),
-        info.getString("amHostHttpAddress"));
-    assertTrue("amContainerLogs doesn't match",
-        info.getString("amContainerLogs").startsWith("http://"));
-    assertTrue("amContainerLogs doesn't contain user info",
-        info.getString("amContainerLogs").endsWith("/" + app.getUser()));
-    assertEquals("allocatedMB doesn't match", 1024, info.getInt("allocatedMB"));
-    assertEquals("allocatedVCores doesn't match", 1,
-        info.getInt("allocatedVCores"));
-    assertEquals("queueUsagePerc doesn't match", 50.0f,
-        (float) info.getDouble("queueUsagePercentage"), 0.01f);
-    assertEquals("clusterUsagePerc doesn't match", 50.0f,
-        (float) info.getDouble("clusterUsagePercentage"), 0.01f);
-    assertEquals("numContainers doesn't match", 1,
-        info.getInt("runningContainers"));
-    assertNotNull("preemptedResourceSecondsMap should not be null",
-        info.getJSONObject("preemptedResourceSecondsMap"));
-    assertEquals("preemptedResourceMB doesn't match",
-        app.getRMAppMetrics().getResourcePreempted().getMemorySize(),
-        info.getInt("preemptedResourceMB"));
-    assertEquals("preemptedResourceVCores doesn't match",
-        app.getRMAppMetrics().getResourcePreempted().getVirtualCores(),
-        info.getInt("preemptedResourceVCores"));
-    assertEquals("numNonAMContainerPreempted doesn't match",
-        app.getRMAppMetrics().getNumNonAMContainersPreempted(),
-        info.getInt("numNonAMContainerPreempted"));
-    assertEquals("numAMContainerPreempted doesn't match",
-        app.getRMAppMetrics().getNumAMContainersPreempted(),
-        info.getInt("numAMContainerPreempted"));
-    assertEquals("Log aggregation Status doesn't match",
-        app.getLogAggregationStatusForAppReport().toString(),
-        info.getString("logAggregationStatus"));
-    assertEquals("unmanagedApplication doesn't match",
-        app.getApplicationSubmissionContext().getUnmanagedAM(),
-        info.getBoolean("unmanagedApplication"));
-
-    if (app.getApplicationSubmissionContext()
-        .getNodeLabelExpression() != null) {
-      assertEquals("appNodeLabelExpression doesn't match",
-          app.getApplicationSubmissionContext().getNodeLabelExpression(),
-          info.getString("appNodeLabelExpression"));
-    }
-    assertEquals("amNodeLabelExpression doesn't match",
-        app.getAMResourceRequests().get(0).getNodeLabelExpression(),
-        info.getString("amNodeLabelExpression"));
-    assertEquals("amRPCAddress",
-        AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
-        info.getString("amRPCAddress"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
deleted file mode 100644
index 7c5b6db..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
-import org.w3c.dom.Element;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlFloat;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Contains all value verifications that are needed to verify {@link AppInfo}
- * XML documents.
- */
-public final class AppInfoXmlVerifications {
-
-  private AppInfoXmlVerifications() {
-    //utility class
-  }
-
-  /**
-   * Tests whether {@link AppInfo} representation object contains the required
-   * values as per defined in the specified app parameter.
-   * @param info
-   * @param  app  an RMApp instance that contains the required values
-   */
-  public static void verify(Element info, RMApp app) {
-    checkStringMatch("id", app.getApplicationId()
-            .toString(), getXmlString(info, "id"));
-    checkStringMatch("user", app.getUser(),
-            getXmlString(info, "user"));
-    checkStringMatch("name", app.getName(),
-            getXmlString(info, "name"));
-    checkStringMatch("applicationType",
-            app.getApplicationType(), getXmlString(info, "applicationType"));
-    checkStringMatch("queue", app.getQueue(),
-            getXmlString(info, "queue"));
-    assertEquals("priority doesn't match", 0, getXmlInt(info, "priority"));
-    checkStringMatch("state", app.getState().toString(),
-            getXmlString(info, "state"));
-    checkStringMatch("finalStatus", app
-            .getFinalApplicationStatus().toString(),
-            getXmlString(info, "finalStatus"));
-    assertEquals("progress doesn't match", 0, getXmlFloat(info, "progress"),
-        0.0);
-    if ("UNASSIGNED".equals(getXmlString(info, "trackingUI"))) {
-      checkStringMatch("trackingUI", "UNASSIGNED",
-              getXmlString(info, "trackingUI"));
-    }
-    WebServicesTestUtils.checkStringEqual("diagnostics",
-            app.getDiagnostics().toString(), getXmlString(info, "diagnostics"));
-    assertEquals("clusterId doesn't match",
-            ResourceManager.getClusterTimeStamp(),
-            getXmlLong(info, "clusterId"));
-    assertEquals("startedTime doesn't match", app.getStartTime(),
-            getXmlLong(info, "startedTime"));
-    assertEquals("finishedTime doesn't match", app.getFinishTime(),
-            getXmlLong(info, "finishedTime"));
-    assertTrue("elapsed time not greater than 0",
-            getXmlLong(info, "elapsedTime") > 0);
-    checkStringMatch("amHostHttpAddress", app
-                    .getCurrentAppAttempt().getMasterContainer()
-                    .getNodeHttpAddress(),
-            getXmlString(info, "amHostHttpAddress"));
-    assertTrue("amContainerLogs doesn't match",
-        getXmlString(info, "amContainerLogs").startsWith("http://"));
-    assertTrue("amContainerLogs doesn't contain user info",
-        getXmlString(info, "amContainerLogs").endsWith("/" + app.getUser()));
-    assertEquals("allocatedMB doesn't match", 1024,
-            getXmlInt(info, "allocatedMB"));
-    assertEquals("allocatedVCores doesn't match", 1,
-            getXmlInt(info, "allocatedVCores"));
-    assertEquals("queueUsagePerc doesn't match", 50.0f,
-            getXmlFloat(info, "queueUsagePercentage"), 0.01f);
-    assertEquals("clusterUsagePerc doesn't match", 50.0f,
-            getXmlFloat(info, "clusterUsagePercentage"), 0.01f);
-    assertEquals("numContainers doesn't match", 1,
-        getXmlInt(info, "runningContainers"));
-    assertNotNull("preemptedResourceSecondsMap should not be null",
-            info.getElementsByTagName("preemptedResourceSecondsMap"));
-    assertEquals("preemptedResourceMB doesn't match", app
-                    .getRMAppMetrics().getResourcePreempted().getMemorySize(),
-            getXmlInt(info, "preemptedResourceMB"));
-    assertEquals("preemptedResourceVCores doesn't match", app
-                    .getRMAppMetrics().getResourcePreempted().getVirtualCores(),
-            getXmlInt(info, "preemptedResourceVCores"));
-    assertEquals("numNonAMContainerPreempted doesn't match", app
-                    .getRMAppMetrics().getNumNonAMContainersPreempted(),
-            getXmlInt(info, "numNonAMContainerPreempted"));
-    assertEquals("numAMContainerPreempted doesn't match", app
-                    .getRMAppMetrics().getNumAMContainersPreempted(),
-            getXmlInt(info, "numAMContainerPreempted"));
-    assertEquals("Log aggregation Status doesn't match", app
-                    .getLogAggregationStatusForAppReport().toString(),
-            getXmlString(info, "logAggregationStatus"));
-    assertEquals("unmanagedApplication doesn't match", app
-                    .getApplicationSubmissionContext().getUnmanagedAM(),
-            getXmlBoolean(info, "unmanagedApplication"));
-    assertEquals("unmanagedApplication doesn't match",
-            app.getApplicationSubmissionContext().getNodeLabelExpression(),
-            getXmlString(info, "appNodeLabelExpression"));
-    assertEquals("unmanagedApplication doesn't match",
-            app.getAMResourceRequests().get(0).getNodeLabelExpression(),
-            getXmlString(info, "amNodeLabelExpression"));
-    assertEquals("amRPCAddress",
-            AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
-            getXmlString(info, "amRPCAddress"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
deleted file mode 100644
index a8990ca..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-
-import com.sun.jersey.api.client.ClientHandlerException;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.UniformInterfaceException;
-
-import javax.ws.rs.core.MediaType;
-import java.io.IOException;
-
-/**
- * This class is merely a wrapper for {@link ClientResponse}. Given that the
- * entity input stream of {@link ClientResponse} can be read only once by
- * default and for some tests it is convenient to read the input stream many
- * times, this class hides the details of how to do that and prevents
- * unnecessary code duplication in tests.
- */
-public class BufferedClientResponse {
-  private ClientResponse response;
-
-  public BufferedClientResponse(ClientResponse response) {
-    response.bufferEntity();
-    this.response = response;
-  }
-
-  public <T> T getEntity(Class<T> clazz)
-          throws ClientHandlerException, UniformInterfaceException {
-    try {
-      response.getEntityInputStream().reset();
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-    return response.getEntity(clazz);
-  }
-
-  public MediaType getType() {
-    return response.getType();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
deleted file mode 100644
index 9d6a111..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.http.JettyUtils;
-import org.codehaus.jettison.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.ws.rs.core.MediaType;
-
-import java.util.function.Consumer;
-
-import static org.junit.Assert.*;
-
-/**
- * This class hides the implementation details of how to verify the structure of
- * JSON responses. Tests should only provide the path of the
- * {@link WebResource}, the response from the resource and
- * the verifier Consumer to
- * {@link JsonCustomResourceTypeTestcase#verify(Consumer)}. An instance of
- * {@link JSONObject} will be passed to that consumer to be able to
- * verify the response.
- */
-public class JsonCustomResourceTypeTestcase {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(JsonCustomResourceTypeTestcase.class);
-
-  private final WebResource path;
-  private final BufferedClientResponse response;
-  private final JSONObject parsedResponse;
-
-  public JsonCustomResourceTypeTestcase(WebResource path,
-                                        BufferedClientResponse response) {
-    this.path = path;
-    this.response = response;
-    this.parsedResponse = response.getEntity(JSONObject.class);
-  }
-
-  public void verify(Consumer<JSONObject> verifier) {
-    assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
-        response.getType().toString());
-
-    logResponse();
-
-    String responseStr = response.getEntity(String.class);
-    if (responseStr == null || responseStr.isEmpty()) {
-      throw new IllegalStateException("Response is null or empty!");
-    }
-    verifier.accept(parsedResponse);
-  }
-
-  private void logResponse() {
-    String responseStr = response.getEntity(String.class);
-    LOG.info("Raw response from service URL {}: {}", path.toString(),
-        responseStr);
-    LOG.info("Parsed response from service URL {}: {}", path.toString(),
-        parsedResponse);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
deleted file mode 100644
index 6e58a89..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-
-import java.util.List;
-import java.util.Map;
-
-import static junit.framework.TestCase.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-/**
- * Performs value verifications on
- * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
- * objects against the values of {@link ResourceRequest}. With the help of the
- * {@link Builder}, users can also make verifications of the custom resource
- * types and its values.
- */
-public class ResourceRequestsJsonVerifications {
-  private final ResourceRequest resourceRequest;
-  private final JSONObject requestInfo;
-  private final Map<String, Long> customResourceTypes;
-  private final List<String> expectedCustomResourceTypes;
-
-  ResourceRequestsJsonVerifications(Builder builder) {
-    this.resourceRequest = builder.resourceRequest;
-    this.requestInfo = builder.requestInfo;
-    this.customResourceTypes = builder.customResourceTypes;
-    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
-  }
-
-  public static void verify(JSONObject requestInfo, ResourceRequest rr)
-      throws JSONException {
-    createDefaultBuilder(requestInfo, rr).build().verify();
-  }
-
-  public static void verifyWithCustomResourceTypes(JSONObject requestInfo,
-      ResourceRequest resourceRequest, List<String> expectedResourceTypes)
-      throws JSONException {
-
-    createDefaultBuilder(requestInfo, resourceRequest)
-        .withExpectedCustomResourceTypes(expectedResourceTypes)
-        .withCustomResourceTypes(
-            extractActualCustomResourceTypes(requestInfo, expectedResourceTypes))
-        .build().verify();
-  }
-
-  private static Builder createDefaultBuilder(JSONObject requestInfo,
-      ResourceRequest resourceRequest) {
-    return new ResourceRequestsJsonVerifications.Builder()
-            .withRequest(resourceRequest)
-            .withRequestInfoJson(requestInfo);
-  }
-
-  private static Map<String, Long> extractActualCustomResourceTypes(
-      JSONObject requestInfo, List<String> expectedResourceTypes)
-      throws JSONException {
-    JSONObject capability = requestInfo.getJSONObject("capability");
-    Map<String, Long> resourceAndValue =
-        extractCustomResorceTypeValues(capability, expectedResourceTypes);
-    Map.Entry<String, Long> resourceEntry =
-        resourceAndValue.entrySet().iterator().next();
-
-    assertTrue(
-        "Found resource type: " + resourceEntry.getKey()
-            + " is not in expected resource types: " + expectedResourceTypes,
-        expectedResourceTypes.contains(resourceEntry.getKey()));
-
-    return resourceAndValue;
-  }
-
-  private static Map<String, Long> extractCustomResorceTypeValues(
-      JSONObject capability, List<String> expectedResourceTypes)
-      throws JSONException {
-    assertTrue(
-        "resourceCategory does not have resourceInformations: " + capability,
-        capability.has("resourceInformations"));
-
-    JSONObject resourceInformations =
-        capability.getJSONObject("resourceInformations");
-    assertTrue(
-        "resourceInformations does not have resourceInformation object: "
-            + resourceInformations,
-        resourceInformations.has("resourceInformation"));
-    JSONArray customResources =
-        resourceInformations.getJSONArray("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        expectedResourceTypes.size(), customResources.length() - 2);
-
-    Map<String, Long> resourceValues = Maps.newHashMap();
-    for (int i = 0; i < customResources.length(); i++) {
-      JSONObject customResource = customResources.getJSONObject(i);
-      assertTrue("Resource type does not have name field: " + customResource,
-          customResource.has("name"));
-      assertTrue("Resource type does not have name resourceType field: "
-          + customResource, customResource.has("resourceType"));
-      assertTrue(
-          "Resource type does not have name units field: " + customResource,
-          customResource.has("units"));
-      assertTrue(
-          "Resource type does not have name value field: " + customResource,
-          customResource.has("value"));
-
-      String name = customResource.getString("name");
-      String unit = customResource.getString("units");
-      String resourceType = customResource.getString("resourceType");
-      Long value = customResource.getLong("value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          expectedResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Custom resource value " + value + " is null!", value);
-      resourceValues.put(name, value);
-    }
-
-    return resourceValues;
-  }
-
-  private void verify() throws JSONException {
-    assertEquals("nodeLabelExpression doesn't match",
-        resourceRequest.getNodeLabelExpression(),
-            requestInfo.getString("nodeLabelExpression"));
-    assertEquals("numContainers doesn't match",
-            resourceRequest.getNumContainers(),
-            requestInfo.getInt("numContainers"));
-    assertEquals("relaxLocality doesn't match",
-            resourceRequest.getRelaxLocality(),
-            requestInfo.getBoolean("relaxLocality"));
-    assertEquals("priority does not match",
-            resourceRequest.getPriority().getPriority(),
-            requestInfo.getInt("priority"));
-    assertEquals("resourceName does not match",
-            resourceRequest.getResourceName(),
-            requestInfo.getString("resourceName"));
-    assertEquals("memory does not match",
-        resourceRequest.getCapability().getMemorySize(),
-            requestInfo.getJSONObject("capability").getLong("memory"));
-    assertEquals("vCores does not match",
-        resourceRequest.getCapability().getVirtualCores(),
-            requestInfo.getJSONObject("capability").getLong("vCores"));
-
-    verifyAtLeastOneCustomResourceIsSerialized();
-
-    JSONObject executionTypeRequest =
-            requestInfo.getJSONObject("executionTypeRequest");
-    assertEquals("executionType does not match",
-        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
-            executionTypeRequest.getString("executionType"));
-    assertEquals("enforceExecutionType does not match",
-            resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
-            executionTypeRequest.getBoolean("enforceExecutionType"));
-  }
-
-  /**
-   * JSON serialization produces "invalid JSON" by default as maps are
-   * serialized like this:
-   * "customResources":{"entry":{"key":"customResource-1","value":"0"}}
-   * If the map has multiple keys then multiple entries will be serialized.
-   * Our json parser in tests cannot handle duplicates therefore only one
-   * custom resource will be in the parsed json. See:
-   * https://issues.apache.org/jira/browse/YARN-7505
-   */
-  private void verifyAtLeastOneCustomResourceIsSerialized() {
-    boolean resourceFound = false;
-    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
-      if (customResourceTypes.containsKey(expectedCustomResourceType)) {
-        resourceFound = true;
-        Long resourceValue =
-            customResourceTypes.get(expectedCustomResourceType);
-        assertNotNull("Resource value should not be null!", resourceValue);
-      }
-    }
-    assertTrue("No custom resource type can be found in the response!",
-        resourceFound);
-  }
-
-  /**
-   * Builder class for {@link ResourceRequestsJsonVerifications}.
-   */
-  public static final class Builder {
-    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
-    private Map<String, Long> customResourceTypes;
-    private ResourceRequest resourceRequest;
-    private JSONObject requestInfo;
-
-    Builder() {
-    }
-
-    public static Builder create() {
-      return new Builder();
-    }
-
-    Builder withExpectedCustomResourceTypes(
-            List<String> expectedCustomResourceTypes) {
-      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
-      return this;
-    }
-
-    Builder withCustomResourceTypes(
-            Map<String, Long> customResourceTypes) {
-      this.customResourceTypes = customResourceTypes;
-      return this;
-    }
-
-    Builder withRequest(ResourceRequest resourceRequest) {
-      this.resourceRequest = resourceRequest;
-      return this;
-    }
-
-    Builder withRequestInfoJson(JSONObject requestInfo) {
-      this.requestInfo = requestInfo;
-      return this;
-    }
-
-    public ResourceRequestsJsonVerifications build() {
-      return new ResourceRequestsJsonVerifications(this);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
deleted file mode 100644
index af9b0f3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.w3c.dom.Element;
-import org.w3c.dom.NodeList;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static junit.framework.TestCase.assertTrue;
-import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-/**
- * Performs value verifications on
- * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
- * objects against the values of {@link ResourceRequest}. With the help of the
- * {@link Builder}, users can also make verifications of the custom resource
- * types and its values.
- */
-public class ResourceRequestsXmlVerifications {
-  private final ResourceRequest resourceRequest;
-  private final Element requestInfo;
-  private final Map<String, Long> customResourceTypes;
-  private final List<String> expectedCustomResourceTypes;
-
-  ResourceRequestsXmlVerifications(Builder builder) {
-    this.resourceRequest = builder.resourceRequest;
-    this.requestInfo = builder.requestInfo;
-    this.customResourceTypes = builder.customResourceTypes;
-    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
-  }
-
-  public static void verifyWithCustomResourceTypes(Element requestInfo,
-      ResourceRequest resourceRequest, List<String> expectedResourceTypes) {
-
-    createDefaultBuilder(requestInfo, resourceRequest)
-        .withExpectedCustomResourceTypes(expectedResourceTypes)
-        .withCustomResourceTypes(extractActualCustomResourceType(requestInfo,
-            expectedResourceTypes))
-        .build().verify();
-  }
-
-  private static Builder createDefaultBuilder(Element requestInfo,
-      ResourceRequest resourceRequest) {
-    return new ResourceRequestsXmlVerifications.Builder()
-        .withRequest(resourceRequest).withRequestInfo(requestInfo);
-  }
-
-  private static Map<String, Long> extractActualCustomResourceType(
-      Element requestInfo, List<String> expectedResourceTypes) {
-    Element capability =
-        (Element) requestInfo.getElementsByTagName("capability").item(0);
-
-    return extractCustomResorceTypes(capability,
-        Sets.newHashSet(expectedResourceTypes));
-  }
-
-  private static Map<String, Long> extractCustomResorceTypes(Element capability,
-      Set<String> expectedResourceTypes) {
-    assertEquals(
-        toXml(capability) + " should have only one resourceInformations child!",
-        1, capability.getElementsByTagName("resourceInformations").getLength());
-    Element resourceInformations = (Element) capability
-        .getElementsByTagName("resourceInformations").item(0);
-
-    NodeList customResources =
-        resourceInformations.getElementsByTagName("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        expectedResourceTypes.size(), customResources.getLength() - 2);
-
-    Map<String, Long> resourceTypesAndValues = Maps.newHashMap();
-    for (int i = 0; i < customResources.getLength(); i++) {
-      Element customResource = (Element) customResources.item(i);
-      String name = getXmlString(customResource, "name");
-      String unit = getXmlString(customResource, "units");
-      String resourceType = getXmlString(customResource, "resourceType");
-      Long value = getXmlLong(customResource, "value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          expectedResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Resource value should not be null for resource type "
-          + resourceType + ", listing xml contents: " + toXml(customResource),
-          value);
-      resourceTypesAndValues.put(name, value);
-    }
-
-    return resourceTypesAndValues;
-  }
-
-  private void verify() {
-    assertEquals("nodeLabelExpression doesn't match",
-        resourceRequest.getNodeLabelExpression(),
-        getXmlString(requestInfo, "nodeLabelExpression"));
-    assertEquals("numContainers doesn't match",
-        resourceRequest.getNumContainers(),
-        getXmlInt(requestInfo, "numContainers"));
-    assertEquals("relaxLocality doesn't match",
-        resourceRequest.getRelaxLocality(),
-        getXmlBoolean(requestInfo, "relaxLocality"));
-    assertEquals("priority does not match",
-        resourceRequest.getPriority().getPriority(),
-        getXmlInt(requestInfo, "priority"));
-    assertEquals("resourceName does not match",
-        resourceRequest.getResourceName(),
-        getXmlString(requestInfo, "resourceName"));
-    Element capability = (Element) requestInfo
-            .getElementsByTagName("capability").item(0);
-    assertEquals("memory does not match",
-        resourceRequest.getCapability().getMemorySize(),
-        getXmlLong(capability, "memory"));
-    assertEquals("vCores does not match",
-        resourceRequest.getCapability().getVirtualCores(),
-        getXmlLong(capability, "vCores"));
-
-    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
-      assertTrue(
-          "Custom resource type " + expectedCustomResourceType
-              + " cannot be found!",
-          customResourceTypes.containsKey(expectedCustomResourceType));
-
-      Long resourceValue = customResourceTypes.get(expectedCustomResourceType);
-      assertNotNull("Resource value should not be null!", resourceValue);
-    }
-
-    Element executionTypeRequest = (Element) requestInfo
-        .getElementsByTagName("executionTypeRequest").item(0);
-    assertEquals("executionType does not match",
-        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
-        getXmlString(executionTypeRequest, "executionType"));
-    assertEquals("enforceExecutionType does not match",
-        resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
-        getXmlBoolean(executionTypeRequest, "enforceExecutionType"));
-  }
-
-  /**
-   * Builder class for {@link ResourceRequestsXmlVerifications}.
-   */
-  public static final class Builder {
-    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
-    private Map<String, Long> customResourceTypes;
-    private ResourceRequest resourceRequest;
-    private Element requestInfo;
-
-    Builder() {
-    }
-
-    public static Builder create() {
-      return new Builder();
-    }
-
-    Builder withExpectedCustomResourceTypes(
-        List<String> expectedCustomResourceTypes) {
-      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
-      return this;
-    }
-
-    Builder withCustomResourceTypes(Map<String, Long> customResourceTypes) {
-      this.customResourceTypes = customResourceTypes;
-      return this;
-    }
-
-    Builder withRequest(ResourceRequest resourceRequest) {
-      this.resourceRequest = resourceRequest;
-      return this;
-    }
-
-    Builder withRequestInfo(Element requestInfo) {
-      this.requestInfo = requestInfo;
-      return this;
-    }
-
-    public ResourceRequestsXmlVerifications build() {
-      return new ResourceRequestsXmlVerifications(this);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
deleted file mode 100644
index 29260aa..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.http.JettyUtils;
-import org.codehaus.jettison.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Node;
-import org.xml.sax.InputSource;
-
-import javax.ws.rs.core.MediaType;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.transform.*;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.function.Consumer;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class hides the implementation details of how to verify the structure of
- * XML responses. Tests should only provide the path of the
- * {@link WebResource}, the response from the resource and
- * the verifier Consumer to
- * {@link XmlCustomResourceTypeTestCase#verify(Consumer)}. An instance of
- * {@link JSONObject} will be passed to that consumer to be able to
- * verify the response.
- */
-public class XmlCustomResourceTypeTestCase {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(XmlCustomResourceTypeTestCase.class);
-
-  private WebResource path;
-  private BufferedClientResponse response;
-  private Document parsedResponse;
-
-  public XmlCustomResourceTypeTestCase(WebResource path,
-                                       BufferedClientResponse response) {
-    this.path = path;
-    this.response = response;
-  }
-
-  public void verify(Consumer<Document> verifier) {
-    assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8,
-        response.getType().toString());
-
-    parsedResponse = parseXml(response);
-    logResponse(parsedResponse);
-    verifier.accept(parsedResponse);
-  }
-
-  private Document parseXml(BufferedClientResponse response) {
-    try {
-      String xml = response.getEntity(String.class);
-      DocumentBuilder db =
-          DocumentBuilderFactory.newInstance().newDocumentBuilder();
-      InputSource is = new InputSource();
-      is.setCharacterStream(new StringReader(xml));
-
-      return db.parse(is);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void logResponse(Document doc) {
-    String responseStr = response.getEntity(String.class);
-    LOG.info("Raw response from service URL {}: {}", path.toString(),
-        responseStr);
-    LOG.info("Parsed response from service URL {}: {}", path.toString(),
-        toXml(doc));
-  }
-
-  public static String toXml(Node node) {
-    StringWriter writer;
-    try {
-      TransformerFactory tf = TransformerFactory.newInstance();
-      Transformer transformer = tf.newTransformer();
-      transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-      transformer.setOutputProperty(
-          "{http://xml.apache.org/xslt}indent" + "-amount", "2");
-      writer = new StringWriter();
-      transformer.transform(new DOMSource(node), new StreamResult(writer));
-    } catch (TransformerException e) {
-      throw new RuntimeException(e);
-    }
-
-    return writer.getBuffer().toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index b5bcbf5..269f5b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,11 +86,11 @@ The allocation file must be in XML format. The format contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an optional attribute 'type', which when set to 'parent' makes it a parent queue. This is useful when we want to create a parent queue without configuring any leaf queues. Each queue element may contain the following properties:
 
-    * **minResources**: minimum resources the queue is entitled to, in the form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is required when specifying resources other than memory and CPU. For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and its minimum. Note that it is possible for a queue that is below its minimum to not immediately get up to its minimum when an a
 pplication is submitted to the queue, because already-running jobs may be using those resources.
+    * **minResources**: minimum resources the queue is entitled to, in the form "X mb, Y vcores". For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and minimum. Note that it is possible that a queue that is below its minimum may not immediately get up to its minimum when it submits an application, because already-running jobs may be using those resources.
 
-    * **maxResources**: maximum resources a queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. A queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxResources**: maximum resources a queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). A queue will not be assigned a container that would put its aggregate usage over this limit.
 
-    * **maxChildResources**: maximum resources an ad hoc child queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxChildResources**: maximum resources an ad hoc child queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
 
     * **maxRunningApps**: limit the number of apps from the queue to run at once
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
new file mode 100644
index 0000000..ca03554
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function () {
+    "use strict";
+
+    var isIgnoredJmxKeys = function (key) {
+        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
+    };
+
+    angular.module('ozoneManager', ['ozone', 'nvd3']);
+    angular.module('ozoneManager').config(function ($routeProvider) {
+        $routeProvider
+            .when("/metrics/ozoneManager", {
+                template: "<om-metrics></om-metrics>"
+            });
+    });
+    angular.module('ozoneManager').component('omMetrics', {
+        templateUrl: 'om-metrics.html',
+        controller: function ($http) {
+            var ctrl = this;
+
+            ctrl.graphOptions = {
+                chart: {
+                    type: 'pieChart',
+                    height: 500,
+                    x: function (d) {
+                        return d.key;
+                    },
+                    y: function (d) {
+                        return d.value;
+                    },
+                    showLabels: true,
+                    labelType: 'value',
+                    duration: 500,
+                    labelThreshold: 0.01,
+                    valueFormat: function(d) {
+                        return d3.format('d')(d);
+                    },
+                    legend: {
+                        margin: {
+                            top: 5,
+                            right: 35,
+                            bottom: 5,
+                            left: 0
+                        }
+                    }
+                }
+            };
+
+
+            $http.get("jmx?qry=Hadoop:service=OzoneManager,name=OMMetrics")
+                .then(function (result) {
+
+                    var groupedMetrics = {others: [], nums: {}};
+                    var metrics = result.data.beans[0]
+                    for (var key in metrics) {
+                        var numericalStatistic = key.match(/Num([A-Z][a-z]+)(.+?)(Fails)?$/);
+                        if (numericalStatistic) {
+                            var type = numericalStatistic[1];
+                            var name = numericalStatistic[2];
+                            var failed = numericalStatistic[3];
+                            groupedMetrics.nums[type] = groupedMetrics.nums[type] || {
+                                    failures: [],
+                                    all: []
+                                };
+                            if (failed) {
+                                groupedMetrics.nums[type].failures.push({
+                                    key: name,
+                                    value: metrics[key]
+                                })
+                            } else {
+                                if (name == "Ops") {
+                                    groupedMetrics.nums[type].ops = metrics[key]
+                                } else {
+                                    groupedMetrics.nums[type].all.push({
+                                        key: name,
+                                        value: metrics[key]
+                                    })
+                                }
+                            }
+                        } else if (isIgnoredJmxKeys(key)) {
+                            //ignore
+                        } else {
+                            groupedMetrics.others.push({
+                                'key': key,
+                                'value': metrics[key]
+                            });
+                        }
+                    }
+                    ctrl.metrics = groupedMetrics;
+                })
+        }
+    });
+
+})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
deleted file mode 100644
index 0b43bf9..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.exceptions
-    .KSMException.ResultCodes;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static org.mockito.Mockito.any;
-
-/**
- * Tests BucketManagerImpl, mocks KSMMetadataManager for testing.
- */
-@RunWith(MockitoJUnitRunner.class)
-public class TestBucketManagerImpl {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private KSMMetadataManager getMetadataManagerMock(String... volumesToCreate)
-      throws IOException {
-    KSMMetadataManager metadataManager = Mockito.mock(KSMMetadataManager.class);
-    Map<String, byte[]> metadataDB = new HashMap<>();
-    ReadWriteLock lock = new ReentrantReadWriteLock();
-
-    Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
-    Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
-    Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer(
-        (InvocationOnMock invocation) ->
-            DFSUtil.string2Bytes(
-                OzoneConsts.KSM_VOLUME_PREFIX + invocation.getArguments()[0]));
-    Mockito.when(metadataManager
-        .getBucketKey(any(String.class), any(String.class))).thenAnswer(
-            (InvocationOnMock invocation) ->
-                DFSUtil.string2Bytes(
-                    OzoneConsts.KSM_VOLUME_PREFIX
-                        + invocation.getArguments()[0]
-                        + OzoneConsts.KSM_BUCKET_PREFIX
-                        + invocation.getArguments()[1]));
-
-    Mockito.doAnswer(
-        new Answer<Boolean>() {
-          @Override
-          public Boolean answer(InvocationOnMock invocation)
-              throws Throwable {
-            String keyRootName =  OzoneConsts.KSM_KEY_PREFIX
-                + invocation.getArguments()[0]
-                + OzoneConsts.KSM_KEY_PREFIX
-                + invocation.getArguments()[1]
-                + OzoneConsts.KSM_KEY_PREFIX;
-            Iterator<String> keyIterator = metadataDB.keySet().iterator();
-            while(keyIterator.hasNext()) {
-              if(keyIterator.next().startsWith(keyRootName)) {
-                return false;
-              }
-            }
-            return true;
-          }
-        }).when(metadataManager).isBucketEmpty(any(String.class),
-        any(String.class));
-
-    Mockito.doAnswer(
-        new Answer<Void>() {
-          @Override
-          public Void answer(InvocationOnMock invocation) throws Throwable {
-            metadataDB.put(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]),
-                (byte[])invocation.getArguments()[1]);
-            return null;
-          }
-        }).when(metadataManager).put(any(byte[].class), any(byte[].class));
-
-    Mockito.when(metadataManager.get(any(byte[].class))).thenAnswer(
-        (InvocationOnMock invocation) ->
-            metadataDB.get(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]))
-    );
-    Mockito.doAnswer(
-        new Answer<Void>() {
-          @Override
-          public Void answer(InvocationOnMock invocation) throws Throwable {
-            metadataDB.remove(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]));
-            return null;
-          }
-        }).when(metadataManager).delete(any(byte[].class));
-
-    for(String volumeName : volumesToCreate) {
-      byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
-      metadataDB.put(OzoneConsts.KSM_VOLUME_PREFIX + volumeName,
-                     dummyVolumeInfo);
-    }
-    return metadataManager;
-  }
-
-  @Test
-  public void testCreateBucketWithoutVolume() throws IOException {
-    thrown.expectMessage("Volume doesn't exist");
-    KSMMetadataManager metaMgr = getMetadataManagerMock();
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_VOLUME_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testCreateBucket() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne"));
-  }
-
-  @Test
-  public void testCreateAlreadyExistingBucket() throws IOException {
-    thrown.expectMessage("Bucket already exist");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-      bucketManager.createBucket(bucketInfo);
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_ALREADY_EXISTS,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testGetBucketInfoForInvalidBucket() throws IOException {
-    thrown.expectMessage("Bucket not found");
-    try {
-      KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      bucketManager.getBucketInfo("sampleVol", "bucketOne");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testGetBucketInfo() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals("sampleVol", result.getVolumeName());
-    Assert.assertEquals("bucketOne", result.getBucketName());
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    Assert.assertEquals(false, result.getIsVersionEnabled());
-  }
-
-  @Test
-  public void testSetBucketPropertyAddACL() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    List<OzoneAcl> acls = new LinkedList<>();
-    OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "root", OzoneAcl.OzoneACLRights.READ);
-    acls.add(ozoneAcl);
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAcls(acls)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals("sampleVol", result.getVolumeName());
-    Assert.assertEquals("bucketOne", result.getBucketName());
-    Assert.assertEquals(1, result.getAcls().size());
-    List<OzoneAcl> addAcls = new LinkedList<>();
-    OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "ozone", OzoneAcl.OzoneACLRights.READ);
-    addAcls.add(newAcl);
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAddAcls(addAcls)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(2, updatedResult.getAcls().size());
-    Assert.assertTrue(updatedResult.getAcls().contains(newAcl));
-  }
-
-  @Test
-  public void testSetBucketPropertyRemoveACL() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    List<OzoneAcl> acls = new LinkedList<>();
-    OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "root", OzoneAcl.OzoneACLRights.READ);
-    OzoneAcl aclTwo = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "ozone", OzoneAcl.OzoneACLRights.READ);
-    acls.add(aclOne);
-    acls.add(aclTwo);
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAcls(acls)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(2, result.getAcls().size());
-    List<OzoneAcl> removeAcls = new LinkedList<>();
-    removeAcls.add(aclTwo);
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setRemoveAcls(removeAcls)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(1, updatedResult.getAcls().size());
-    Assert.assertFalse(updatedResult.getAcls().contains(aclTwo));
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeStorageType() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.SSD)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.SSD,
-        updatedResult.getStorageType());
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeVersioning() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertFalse(result.getIsVersionEnabled());
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(true)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertTrue(updatedResult.getIsVersionEnabled());
-  }
-
-  @Test
-  public void testDeleteBucket() throws IOException {
-    thrown.expectMessage("Bucket not found");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    for(int i = 0; i < 5; i++) {
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucket_" + i)
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    }
-    for(int i = 0; i < 5; i++) {
-      Assert.assertEquals("bucket_" + i,
-          bucketManager.getBucketInfo(
-              "sampleVol", "bucket_" + i).getBucketName());
-    }
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucket_1");
-      Assert.assertNotNull(bucketManager.getBucketInfo(
-          "sampleVol", "bucket_2"));
-    } catch(IOException ex) {
-      Assert.fail(ex.getMessage());
-    }
-    try {
-      bucketManager.getBucketInfo("sampleVol", "bucket_1");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testDeleteNonEmptyBucket() throws IOException {
-    thrown.expectMessage("Bucket is not empty");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    //Create keys in bucket
-    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_one"),
-        DFSUtil.string2Bytes("value_one"));
-    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_two"),
-        DFSUtil.string2Bytes("value_two"));
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucketOne");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_EMPTY,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
deleted file mode 100644
index e6158bd..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
-import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
-import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.OutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class tests ChunkGroupInputStream and ChunkGroupOutStream.
- */
-public class TestChunkStreams {
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * This test uses ByteArrayOutputStream as the underlying stream to test
-   * the correctness of ChunkGroupOutputStream.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testWriteGroupOutputStream() throws Exception {
-    try (ChunkGroupOutputStream groupOutputStream =
-             new ChunkGroupOutputStream()) {
-      ArrayList<OutputStream> outputStreams = new ArrayList<>();
-
-      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
-      // of them with 100 bytes.
-      for (int i = 0; i < 5; i++) {
-        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
-        outputStreams.add(out);
-        groupOutputStream.addStream(out, 100);
-      }
-      assertEquals(0, groupOutputStream.getByteOffset());
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] data = dataString.getBytes();
-      groupOutputStream.write(data, 0, data.length);
-      assertEquals(500, groupOutputStream.getByteOffset());
-
-      String res = "";
-      int offset = 0;
-      for (OutputStream stream : outputStreams) {
-        String subString = stream.toString();
-        res += subString;
-        assertEquals(dataString.substring(offset, offset + 100), subString);
-        offset += 100;
-      }
-      assertEquals(dataString, res);
-    }
-  }
-
-  @Test
-  public void testErrorWriteGroupOutputStream() throws Exception {
-    try (ChunkGroupOutputStream groupOutputStream =
-             new ChunkGroupOutputStream()) {
-      ArrayList<OutputStream> outputStreams = new ArrayList<>();
-
-      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
-      // of them with 100 bytes. all 5 streams makes up a ChunkGroupOutputStream
-      // with a total of 500 bytes in size
-      for (int i = 0; i < 5; i++) {
-        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
-        outputStreams.add(out);
-        groupOutputStream.addStream(out, 100);
-      }
-      assertEquals(0, groupOutputStream.getByteOffset());
-
-      // first writes of 100 bytes should succeed
-      groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
-      assertEquals(100, groupOutputStream.getByteOffset());
-
-      // second writes of 500 bytes should fail, as there should be only 400
-      // bytes space left
-      // TODO : if we decide to take the 400 bytes instead in the future,
-      // other add more informative error code rather than exception, need to
-      // change this part.
-      exception.expect(Exception.class);
-      groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
-      assertEquals(100, groupOutputStream.getByteOffset());
-    }
-  }
-
-  @Test
-  public void testReadGroupInputStream() throws Exception {
-    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
-      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes();
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        ChunkInputStream in =
-            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public long getPos() throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public int read(byte[] b, int off, int len) throws IOException {
-                return in.read(b, off, len);
-              }
-            };
-        inputStreams.add(in);
-        offset += 100;
-        groupInputStream.addStream(in, 100);
-      }
-
-      byte[] resBuf = new byte[500];
-      int len = groupInputStream.read(resBuf, 0, 500);
-
-      assertEquals(500, len);
-      assertEquals(dataString, new String(resBuf));
-    }
-  }
-
-  @Test
-  public void testErrorReadGroupInputStream() throws Exception {
-    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
-      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes();
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        ChunkInputStream in =
-            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public long getPos() throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public int read(byte[] b, int off, int len) throws IOException {
-                return in.read(b, off, len);
-              }
-            };
-        inputStreams.add(in);
-        offset += 100;
-        groupInputStream.addStream(in, 100);
-      }
-
-      byte[] resBuf = new byte[600];
-      // read 300 bytes first
-      int len = groupInputStream.read(resBuf, 0, 340);
-      assertEquals(3, groupInputStream.getCurrentStreamIndex());
-      assertEquals(60, groupInputStream.getRemainingOfIndex(3));
-      assertEquals(340, len);
-      assertEquals(dataString.substring(0, 340),
-          new String(resBuf).substring(0, 340));
-
-      // read following 300 bytes, but only 200 left
-      len = groupInputStream.read(resBuf, 340, 260);
-      assertEquals(5, groupInputStream.getCurrentStreamIndex());
-      assertEquals(0, groupInputStream.getRemainingOfIndex(4));
-      assertEquals(160, len);
-      assertEquals(dataString, new String(resBuf).substring(0, 500));
-
-      // further read should get EOF
-      len = groupInputStream.read(resBuf, 0, 1);
-      // reached EOF, further read should get -1
-      assertEquals(-1, len);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
deleted file mode 100644
index b263df5..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpConfig.Policy;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.net.URLConnection;
-import java.util.Arrays;
-import java.util.Collection;
-
-/**
- * Test http server os KSM with various HTTP option.
- */
-@RunWith(value = Parameterized.class)
-public class TestKeySpaceManagerHttpServer {
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestKeySpaceManagerHttpServer.class.getSimpleName());
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static Configuration conf;
-  private static URLConnectionFactory connectionFactory;
-
-  @Parameters public static Collection<Object[]> policy() {
-    Object[][] params = new Object[][] {
-        {HttpConfig.Policy.HTTP_ONLY},
-        {HttpConfig.Policy.HTTPS_ONLY},
-        {HttpConfig.Policy.HTTP_AND_HTTPS} };
-    return Arrays.asList(params);
-  }
-
-  private final HttpConfig.Policy policy;
-
-  public TestKeySpaceManagerHttpServer(Policy policy) {
-    super();
-    this.policy = policy;
-  }
-
-  @BeforeClass public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    conf = new Configuration();
-    keystoresDir = new File(BASEDIR).getAbsolutePath();
-    sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        TestKeySpaceManagerHttpServer.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
-    connectionFactory =
-        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
-    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getClientSSLConfigFileName());
-    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getServerSSLConfigFileName());
-  }
-
-  @AfterClass public static void tearDown() throws Exception {
-    FileUtil.fullyDelete(new File(BASEDIR));
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-  }
-
-  @Test public void testHttpPolicy() throws Exception {
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-
-    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
-    KeySpaceManagerHttpServer server = null;
-    try {
-      server = new KeySpaceManagerHttpServer(conf, null);
-      server.start();
-
-      Assert.assertTrue(implies(policy.isHttpEnabled(),
-          canAccess("http", server.getHttpAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpEnabled(), server.getHttpAddress() == null));
-
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
-          canAccess("https", server.getHttpsAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
-
-    } finally {
-      if (server != null) {
-        server.stop();
-      }
-    }
-  }
-
-  private static boolean canAccess(String scheme, InetSocketAddress addr) {
-    if (addr == null) {
-      return false;
-    }
-    try {
-      URL url =
-          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
-      URLConnection conn = connectionFactory.openConnection(url);
-      conn.connect();
-      conn.getContent();
-    } catch (Exception e) {
-      return false;
-    }
-    return true;
-  }
-
-  private static boolean implies(boolean a, boolean b) {
-    return !a || b;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
deleted file mode 100644
index 089ff4b..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-/**
- * KSM tests
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
new file mode 100644
index 0000000..1ecac7f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.LinkedList;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.mockito.Mockito.any;
+
+/**
+ * Tests BucketManagerImpl, mocks OMMetadataManager for testing.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class TestBucketManagerImpl {
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private OMMetadataManager getMetadataManagerMock(String... volumesToCreate)
+      throws IOException {
+    OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class);
+    Map<String, byte[]> metadataDB = new HashMap<>();
+    ReadWriteLock lock = new ReentrantReadWriteLock();
+
+    Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
+    Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
+    Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer(
+        (InvocationOnMock invocation) ->
+            DFSUtil.string2Bytes(
+                OzoneConsts.OM_VOLUME_PREFIX + invocation.getArguments()[0]));
+    Mockito.when(metadataManager
+        .getBucketKey(any(String.class), any(String.class))).thenAnswer(
+            (InvocationOnMock invocation) ->
+                DFSUtil.string2Bytes(
+                    OzoneConsts.OM_VOLUME_PREFIX
+                        + invocation.getArguments()[0]
+                        + OzoneConsts.OM_BUCKET_PREFIX
+                        + invocation.getArguments()[1]));
+
+    Mockito.doAnswer(
+        new Answer<Boolean>() {
+          @Override
+          public Boolean answer(InvocationOnMock invocation)
+              throws Throwable {
+            String keyRootName =  OzoneConsts.OM_KEY_PREFIX
+                + invocation.getArguments()[0]
+                + OzoneConsts.OM_KEY_PREFIX
+                + invocation.getArguments()[1]
+                + OzoneConsts.OM_KEY_PREFIX;
+            Iterator<String> keyIterator = metadataDB.keySet().iterator();
+            while(keyIterator.hasNext()) {
+              if(keyIterator.next().startsWith(keyRootName)) {
+                return false;
+              }
+            }
+            return true;
+          }
+        }).when(metadataManager).isBucketEmpty(any(String.class),
+        any(String.class));
+
+    Mockito.doAnswer(
+        new Answer<Void>() {
+          @Override
+          public Void answer(InvocationOnMock invocation) throws Throwable {
+            metadataDB.put(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]),
+                (byte[])invocation.getArguments()[1]);
+            return null;
+          }
+        }).when(metadataManager).put(any(byte[].class), any(byte[].class));
+
+    Mockito.when(metadataManager.get(any(byte[].class))).thenAnswer(
+        (InvocationOnMock invocation) ->
+            metadataDB.get(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]))
+    );
+    Mockito.doAnswer(
+        new Answer<Void>() {
+          @Override
+          public Void answer(InvocationOnMock invocation) throws Throwable {
+            metadataDB.remove(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]));
+            return null;
+          }
+        }).when(metadataManager).delete(any(byte[].class));
+
+    for(String volumeName : volumesToCreate) {
+      byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
+      metadataDB.put(OzoneConsts.OM_VOLUME_PREFIX + volumeName,
+                     dummyVolumeInfo);
+    }
+    return metadataManager;
+  }
+
+  @Test
+  public void testCreateBucketWithoutVolume() throws IOException {
+    thrown.expectMessage("Volume doesn't exist");
+    OMMetadataManager metaMgr = getMetadataManagerMock();
+    try {
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucketOne")
+          .build();
+      bucketManager.createBucket(bucketInfo);
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_VOLUME_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testCreateBucket() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne"));
+  }
+
+  @Test
+  public void testCreateAlreadyExistingBucket() throws IOException {
+    thrown.expectMessage("Bucket already exist");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    try {
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucketOne")
+          .build();
+      bucketManager.createBucket(bucketInfo);
+      bucketManager.createBucket(bucketInfo);
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_ALREADY_EXISTS,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testGetBucketInfoForInvalidBucket() throws IOException {
+    thrown.expectMessage("Bucket not found");
+    try {
+      OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      bucketManager.getBucketInfo("sampleVol", "bucketOne");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testGetBucketInfo() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals("sampleVol", result.getVolumeName());
+    Assert.assertEquals("bucketOne", result.getBucketName());
+    Assert.assertEquals(StorageType.DISK,
+        result.getStorageType());
+    Assert.assertEquals(false, result.getIsVersionEnabled());
+  }
+
+  @Test
+  public void testSetBucketPropertyAddACL() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    List<OzoneAcl> acls = new LinkedList<>();
+    OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "root", OzoneAcl.OzoneACLRights.READ);
+    acls.add(ozoneAcl);
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAcls(acls)
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals("sampleVol", result.getVolumeName());
+    Assert.assertEquals("bucketOne", result.getBucketName());
+    Assert.assertEquals(1, result.getAcls().size());
+    List<OzoneAcl> addAcls = new LinkedList<>();
+    OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "ozone", OzoneAcl.OzoneACLRights.READ);
+    addAcls.add(newAcl);
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAddAcls(addAcls)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(2, updatedResult.getAcls().size());
+    Assert.assertTrue(updatedResult.getAcls().contains(newAcl));
+  }
+
+  @Test
+  public void testSetBucketPropertyRemoveACL() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    List<OzoneAcl> acls = new LinkedList<>();
+    OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "root", OzoneAcl.OzoneACLRights.READ);
+    OzoneAcl aclTwo = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "ozone", OzoneAcl.OzoneACLRights.READ);
+    acls.add(aclOne);
+    acls.add(aclTwo);
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAcls(acls)
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(2, result.getAcls().size());
+    List<OzoneAcl> removeAcls = new LinkedList<>();
+    removeAcls.add(aclTwo);
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setRemoveAcls(removeAcls)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(1, updatedResult.getAcls().size());
+    Assert.assertFalse(updatedResult.getAcls().contains(aclTwo));
+  }
+
+  @Test
+  public void testSetBucketPropertyChangeStorageType() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.DISK)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(StorageType.DISK,
+        result.getStorageType());
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.SSD)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(StorageType.SSD,
+        updatedResult.getStorageType());
+  }
+
+  @Test
+  public void testSetBucketPropertyChangeVersioning() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertFalse(result.getIsVersionEnabled());
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setIsVersionEnabled(true)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertTrue(updatedResult.getIsVersionEnabled());
+  }
+
+  @Test
+  public void testDeleteBucket() throws IOException {
+    thrown.expectMessage("Bucket not found");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    for(int i = 0; i < 5; i++) {
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucket_" + i)
+          .build();
+      bucketManager.createBucket(bucketInfo);
+    }
+    for(int i = 0; i < 5; i++) {
+      Assert.assertEquals("bucket_" + i,
+          bucketManager.getBucketInfo(
+              "sampleVol", "bucket_" + i).getBucketName());
+    }
+    try {
+      bucketManager.deleteBucket("sampleVol", "bucket_1");
+      Assert.assertNotNull(bucketManager.getBucketInfo(
+          "sampleVol", "bucket_2"));
+    } catch(IOException ex) {
+      Assert.fail(ex.getMessage());
+    }
+    try {
+      bucketManager.getBucketInfo("sampleVol", "bucket_1");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testDeleteNonEmptyBucket() throws IOException {
+    thrown.expectMessage("Bucket is not empty");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    //Create keys in bucket
+    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_one"),
+        DFSUtil.string2Bytes("value_one"));
+    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_two"),
+        DFSUtil.string2Bytes("value_two"));
+    try {
+      bucketManager.deleteBucket("sampleVol", "bucketOne");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_EMPTY,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
new file mode 100644
index 0000000..7ce916a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
+import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
+import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class tests ChunkGroupInputStream and ChunkGroupOutStream.
+ */
+public class TestChunkStreams {
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * This test uses ByteArrayOutputStream as the underlying stream to test
+   * the correctness of ChunkGroupOutputStream.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testWriteGroupOutputStream() throws Exception {
+    try (ChunkGroupOutputStream groupOutputStream =
+             new ChunkGroupOutputStream()) {
+      ArrayList<OutputStream> outputStreams = new ArrayList<>();
+
+      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
+      // of them with 100 bytes.
+      for (int i = 0; i < 5; i++) {
+        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
+        outputStreams.add(out);
+        groupOutputStream.addStream(out, 100);
+      }
+      assertEquals(0, groupOutputStream.getByteOffset());
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] data = dataString.getBytes();
+      groupOutputStream.write(data, 0, data.length);
+      assertEquals(500, groupOutputStream.getByteOffset());
+
+      String res = "";
+      int offset = 0;
+      for (OutputStream stream : outputStreams) {
+        String subString = stream.toString();
+        res += subString;
+        assertEquals(dataString.substring(offset, offset + 100), subString);
+        offset += 100;
+      }
+      assertEquals(dataString, res);
+    }
+  }
+
+  @Test
+  public void testErrorWriteGroupOutputStream() throws Exception {
+    try (ChunkGroupOutputStream groupOutputStream =
+             new ChunkGroupOutputStream()) {
+      ArrayList<OutputStream> outputStreams = new ArrayList<>();
+
+      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
+      // of them with 100 bytes. all 5 streams makes up a ChunkGroupOutputStream
+      // with a total of 500 bytes in size
+      for (int i = 0; i < 5; i++) {
+        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
+        outputStreams.add(out);
+        groupOutputStream.addStream(out, 100);
+      }
+      assertEquals(0, groupOutputStream.getByteOffset());
+
+      // first writes of 100 bytes should succeed
+      groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
+      assertEquals(100, groupOutputStream.getByteOffset());
+
+      // second writes of 500 bytes should fail, as there should be only 400
+      // bytes space left
+      // TODO : if we decide to take the 400 bytes instead in the future,
+      // other add more informative error code rather than exception, need to
+      // change this part.
+      exception.expect(Exception.class);
+      groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
+      assertEquals(100, groupOutputStream.getByteOffset());
+    }
+  }
+
+  @Test
+  public void testReadGroupInputStream() throws Exception {
+    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
+      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] buf = dataString.getBytes();
+      int offset = 0;
+      for (int i = 0; i < 5; i++) {
+        int tempOffset = offset;
+        ChunkInputStream in =
+            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+              private ByteArrayInputStream in =
+                  new ByteArrayInputStream(buf, tempOffset, 100);
+
+              @Override
+              public void seek(long pos) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long getPos() throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean seekToNewSource(long targetPos)
+                  throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int read() throws IOException {
+                return in.read();
+              }
+
+              @Override
+              public int read(byte[] b, int off, int len) throws IOException {
+                return in.read(b, off, len);
+              }
+            };
+        inputStreams.add(in);
+        offset += 100;
+        groupInputStream.addStream(in, 100);
+      }
+
+      byte[] resBuf = new byte[500];
+      int len = groupInputStream.read(resBuf, 0, 500);
+
+      assertEquals(500, len);
+      assertEquals(dataString, new String(resBuf));
+    }
+  }
+
+  @Test
+  public void testErrorReadGroupInputStream() throws Exception {
+    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
+      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] buf = dataString.getBytes();
+      int offset = 0;
+      for (int i = 0; i < 5; i++) {
+        int tempOffset = offset;
+        ChunkInputStream in =
+            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+              private ByteArrayInputStream in =
+                  new ByteArrayInputStream(buf, tempOffset, 100);
+
+              @Override
+              public void seek(long pos) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long getPos() throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean seekToNewSource(long targetPos)
+                  throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int read() throws IOException {
+                return in.read();
+              }
+
+              @Override
+              public int read(byte[] b, int off, int len) throws IOException {
+                return in.read(b, off, len);
+              }
+            };
+        inputStreams.add(in);
+        offset += 100;
+        groupInputStream.addStream(in, 100);
+      }
+
+      byte[] resBuf = new byte[600];
+      // read 300 bytes first
+      int len = groupInputStream.read(resBuf, 0, 340);
+      assertEquals(3, groupInputStream.getCurrentStreamIndex());
+      assertEquals(60, groupInputStream.getRemainingOfIndex(3));
+      assertEquals(340, len);
+      assertEquals(dataString.substring(0, 340),
+          new String(resBuf).substring(0, 340));
+
+      // read following 300 bytes, but only 200 left
+      len = groupInputStream.read(resBuf, 340, 260);
+      assertEquals(5, groupInputStream.getCurrentStreamIndex());
+      assertEquals(0, groupInputStream.getRemainingOfIndex(4));
+      assertEquals(160, len);
+      assertEquals(dataString, new String(resBuf).substring(0, 500));
+
+      // further read should get EOF
+      len = groupInputStream.read(resBuf, 0, 1);
+      // reached EOF, further read should get -1
+      assertEquals(-1, len);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
new file mode 100644
index 0000000..3e11a13
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.http.HttpConfig.Policy;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Arrays;
+import java.util.Collection;
+
+/**
+ * Test http server of OM with various HTTP option.
+ */
+@RunWith(value = Parameterized.class)
+public class TestOzoneManagerHttpServer {
+  private static final String BASEDIR = GenericTestUtils
+      .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName());
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static Configuration conf;
+  private static URLConnectionFactory connectionFactory;
+
+  @Parameters public static Collection<Object[]> policy() {
+    Object[][] params = new Object[][] {
+        {HttpConfig.Policy.HTTP_ONLY},
+        {HttpConfig.Policy.HTTPS_ONLY},
+        {HttpConfig.Policy.HTTP_AND_HTTPS} };
+    return Arrays.asList(params);
+  }
+
+  private final HttpConfig.Policy policy;
+
+  public TestOzoneManagerHttpServer(Policy policy) {
+    super();
+    this.policy = policy;
+  }
+
+  @BeforeClass public static void setUp() throws Exception {
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    base.mkdirs();
+    conf = new Configuration();
+    keystoresDir = new File(BASEDIR).getAbsolutePath();
+    sslConfDir = KeyStoreTestUtil.getClasspathDir(
+        TestOzoneManagerHttpServer.class);
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+    connectionFactory =
+        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
+    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getClientSSLConfigFileName());
+    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getServerSSLConfigFileName());
+  }
+
+  @AfterClass public static void tearDown() throws Exception {
+    FileUtil.fullyDelete(new File(BASEDIR));
+    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+  }
+
+  @Test public void testHttpPolicy() throws Exception {
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
+    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
+    OzoneManagerHttpServer server = null;
+    try {
+      server = new OzoneManagerHttpServer(conf, null);
+      server.start();
+
+      Assert.assertTrue(implies(policy.isHttpEnabled(),
+          canAccess("http", server.getHttpAddress())));
+      Assert.assertTrue(
+          implies(!policy.isHttpEnabled(), server.getHttpAddress() == null));
+
+      Assert.assertTrue(implies(policy.isHttpsEnabled(),
+          canAccess("https", server.getHttpsAddress())));
+      Assert.assertTrue(
+          implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
+
+    } finally {
+      if (server != null) {
+        server.stop();
+      }
+    }
+  }
+
+  private static boolean canAccess(String scheme, InetSocketAddress addr) {
+    if (addr == null) {
+      return false;
+    }
+    try {
+      URL url =
+          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
+      URLConnection conn = connectionFactory.openConnection(url);
+      conn.connect();
+      conn.getContent();
+    } catch (Exception e) {
+      return false;
+    }
+    return true;
+  }
+
+  private static boolean implies(boolean a, boolean b) {
+    return !a || b;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..12fcf7c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+/**
+ * OM tests
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index 8417e46..b63e182 100644
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.junit.Assert;
 
@@ -109,7 +109,7 @@ class OzoneContract extends AbstractFSContract {
     String uri = String.format("%s://%s.%s/",
         OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
     getConf().set("fs.defaultFS", uri);
-    copyClusterConfigs(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY);
+    copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY);
     copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
     return FileSystem.get(getConf());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 26776c5..3884edd 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.util.Tool;
@@ -60,10 +60,10 @@ import java.util.HashSet;
 import java.util.Set;
 
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_BUCKET_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_VOLUME_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 
 /**
@@ -120,7 +120,7 @@ public class SQLCLI  extends Configured implements Tool {
       "INSERT INTO openContainer (containerName, containerUsed) " +
           "VALUES (\"%s\", \"%s\")";
 
-  // for ksm.db
+  // for om.db
   private static final String CREATE_VOLUME_LIST =
       "CREATE TABLE volumeList (" +
           "userName TEXT NOT NULL," +
@@ -278,9 +278,9 @@ public class SQLCLI  extends Configured implements Tool {
     } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
       LOG.info("Converting open container DB");
       convertOpenContainerDB(dbPath, outPath);
-    } else if (dbName.toString().equals(KSM_DB_NAME)) {
-      LOG.info("Converting ksm DB");
-      convertKSMDB(dbPath, outPath);
+    } else if (dbName.toString().equals(OM_DB_NAME)) {
+      LOG.info("Converting om DB");
+      convertOMDB(dbPath, outPath);
     } else {
       LOG.error("Unrecognized db name {}", dbName);
     }
@@ -301,7 +301,7 @@ public class SQLCLI  extends Configured implements Tool {
   }
 
   /**
-   * Convert ksm.db to sqlite db file. With following schema.
+   * Convert om.db to sqlite db file. With following schema.
    * (* for primary key)
    *
    * 1. for key type USER, it contains a username and a list volumes
@@ -341,8 +341,8 @@ public class SQLCLI  extends Configured implements Tool {
    * @param outPath
    * @throws Exception
    */
-  private void convertKSMDB(Path dbPath, Path outPath) throws Exception {
-    LOG.info("Create tables for sql ksm db.");
+  private void convertOMDB(Path dbPath, Path outPath) throws Exception {
+    LOG.info("Create tables for sql om db.");
     File dbFile = dbPath.toFile();
     try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
         .setConf(conf).setDbFile(dbFile).build();
@@ -357,7 +357,7 @@ public class SQLCLI  extends Configured implements Tool {
         String keyString = DFSUtilClient.bytes2String(key);
         KeyType type = getKeyType(keyString);
         try {
-          insertKSMDB(conn, type, keyString, value);
+          insertOMDB(conn, type, keyString, value);
         } catch (IOException | SQLException ex) {
           LOG.error("Exception inserting key {} type {}", keyString, type, ex);
         }
@@ -366,8 +366,8 @@ public class SQLCLI  extends Configured implements Tool {
     }
   }
 
-  private void insertKSMDB(Connection conn, KeyType type, String keyName,
-      byte[] value) throws IOException, SQLException {
+  private void insertOMDB(Connection conn, KeyType type, String keyName,
+                          byte[] value) throws IOException, SQLException {
     switch (type) {
     case USER:
       VolumeList volumeList = VolumeList.parseFrom(value);
@@ -412,16 +412,16 @@ public class SQLCLI  extends Configured implements Tool {
       executeSQL(conn, insertKeyInfo);
       break;
     default:
-      throw new IOException("Unknown key from ksm.db");
+      throw new IOException("Unknown key from om.db");
     }
   }
 
   private KeyType getKeyType(String key) {
-    if (key.startsWith(KSM_USER_PREFIX)) {
+    if (key.startsWith(OM_USER_PREFIX)) {
       return KeyType.USER;
-    } else if (key.startsWith(KSM_VOLUME_PREFIX)) {
-      return key.replaceFirst(KSM_VOLUME_PREFIX, "")
-          .contains(KSM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
+    } else if (key.startsWith(OM_VOLUME_PREFIX)) {
+      return key.replaceFirst(OM_VOLUME_PREFIX, "")
+          .contains(OM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
     }else {
       return KeyType.KEY;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java
deleted file mode 100644
index bf7d870..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java
+++ /dev/null
@@ -1,306 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-import java.io.IOException;
-
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test for KSM metrics.
- */
-public class TestKSMMetrcis {
-  private MiniOzoneCluster cluster;
-  private KeySpaceManager ksmManager;
-
-  /**
-   * The exception used for testing failure metrics.
-   */
-  private IOException exception = new IOException();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    ksmManager = cluster.getKeySpaceManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testVolumeOps() throws IOException {
-    VolumeManager volumeManager = (VolumeManager) Whitebox
-        .getInternalState(ksmManager, "volumeManager");
-    VolumeManager mockVm = Mockito.spy(volumeManager);
-
-    Mockito.doNothing().when(mockVm).createVolume(null);
-    Mockito.doNothing().when(mockVm).deleteVolume(null);
-    Mockito.doReturn(null).when(mockVm).getVolumeInfo(null);
-    Mockito.doReturn(true).when(mockVm).checkVolumeAccess(null, null);
-    Mockito.doNothing().when(mockVm).setOwner(null, null);
-    Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "volumeManager", mockVm);
-    doVolumeOps();
-
-    MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumVolumeOps", 6L, ksmMetrics);
-    assertCounter("NumVolumeCreates", 1L, ksmMetrics);
-    assertCounter("NumVolumeUpdates", 1L, ksmMetrics);
-    assertCounter("NumVolumeInfos", 1L, ksmMetrics);
-    assertCounter("NumVolumeCheckAccesses", 1L, ksmMetrics);
-    assertCounter("NumVolumeDeletes", 1L, ksmMetrics);
-    assertCounter("NumVolumeLists", 1L, ksmMetrics);
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockVm).createVolume(null);
-    Mockito.doThrow(exception).when(mockVm).deleteVolume(null);
-    Mockito.doThrow(exception).when(mockVm).getVolumeInfo(null);
-    Mockito.doThrow(exception).when(mockVm).checkVolumeAccess(null, null);
-    Mockito.doThrow(exception).when(mockVm).setOwner(null, null);
-    Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "volumeManager", mockVm);
-    doVolumeOps();
-
-    ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumVolumeOps", 12L, ksmMetrics);
-    assertCounter("NumVolumeCreates", 2L, ksmMetrics);
-    assertCounter("NumVolumeUpdates", 2L, ksmMetrics);
-    assertCounter("NumVolumeInfos", 2L, ksmMetrics);
-    assertCounter("NumVolumeCheckAccesses", 2L, ksmMetrics);
-    assertCounter("NumVolumeDeletes", 2L, ksmMetrics);
-    assertCounter("NumVolumeLists", 2L, ksmMetrics);
-
-    assertCounter("NumVolumeCreateFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeUpdateFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeInfoFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeCheckAccessFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeDeleteFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeListFails", 1L, ksmMetrics);
-  }
-
-  @Test
-  public void testBucketOps() throws IOException {
-    BucketManager bucketManager = (BucketManager) Whitebox
-        .getInternalState(ksmManager, "bucketManager");
-    BucketManager mockBm = Mockito.spy(bucketManager);
-
-    Mockito.doNothing().when(mockBm).createBucket(null);
-    Mockito.doNothing().when(mockBm).deleteBucket(null, null);
-    Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null);
-    Mockito.doNothing().when(mockBm).setBucketProperty(null);
-    Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "bucketManager", mockBm);
-    doBucketOps();
-
-    MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumBucketOps", 5L, ksmMetrics);
-    assertCounter("NumBucketCreates", 1L, ksmMetrics);
-    assertCounter("NumBucketUpdates", 1L, ksmMetrics);
-    assertCounter("NumBucketInfos", 1L, ksmMetrics);
-    assertCounter("NumBucketDeletes", 1L, ksmMetrics);
-    assertCounter("NumBucketLists", 1L, ksmMetrics);
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockBm).createBucket(null);
-    Mockito.doThrow(exception).when(mockBm).deleteBucket(null, null);
-    Mockito.doThrow(exception).when(mockBm).getBucketInfo(null, null);
-    Mockito.doThrow(exception).when(mockBm).setBucketProperty(null);
-    Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "bucketManager", mockBm);
-    doBucketOps();
-
-    ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumBucketOps", 10L, ksmMetrics);
-    assertCounter("NumBucketCreates", 2L, ksmMetrics);
-    assertCounter("NumBucketUpdates", 2L, ksmMetrics);
-    assertCounter("NumBucketInfos", 2L, ksmMetrics);
-    assertCounter("NumBucketDeletes", 2L, ksmMetrics);
-    assertCounter("NumBucketLists", 2L, ksmMetrics);
-
-    assertCounter("NumBucketCreateFails", 1L, ksmMetrics);
-    assertCounter("NumBucketUpdateFails", 1L, ksmMetrics);
-    assertCounter("NumBucketInfoFails", 1L, ksmMetrics);
-    assertCounter("NumBucketDeleteFails", 1L, ksmMetrics);
-    assertCounter("NumBucketListFails", 1L, ksmMetrics);
-  }
-
-  @Test
-  public void testKeyOps() throws IOException {
-    KeyManager bucketManager = (KeyManager) Whitebox
-        .getInternalState(ksmManager, "keyManager");
-    KeyManager mockKm = Mockito.spy(bucketManager);
-
-    Mockito.doReturn(null).when(mockKm).openKey(null);
-    Mockito.doNothing().when(mockKm).deleteKey(null);
-    Mockito.doReturn(null).when(mockKm).lookupKey(null);
-    Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "keyManager", mockKm);
-    doKeyOps();
-
-    MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumKeyOps", 4L, ksmMetrics);
-    assertCounter("NumKeyAllocate", 1L, ksmMetrics);
-    assertCounter("NumKeyLookup", 1L, ksmMetrics);
-    assertCounter("NumKeyDeletes", 1L, ksmMetrics);
-    assertCounter("NumKeyLists", 1L, ksmMetrics);
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockKm).openKey(null);
-    Mockito.doThrow(exception).when(mockKm).deleteKey(null);
-    Mockito.doThrow(exception).when(mockKm).lookupKey(null);
-    Mockito.doThrow(exception).when(mockKm).listKeys(
-        null, null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "keyManager", mockKm);
-    doKeyOps();
-
-    ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumKeyOps", 8L, ksmMetrics);
-    assertCounter("NumKeyAllocate", 2L, ksmMetrics);
-    assertCounter("NumKeyLookup", 2L, ksmMetrics);
-    assertCounter("NumKeyDeletes", 2L, ksmMetrics);
-    assertCounter("NumKeyLists", 2L, ksmMetrics);
-
-    assertCounter("NumKeyAllocateFails", 1L, ksmMetrics);
-    assertCounter("NumKeyLookupFails", 1L, ksmMetrics);
-    assertCounter("NumKeyDeleteFails", 1L, ksmMetrics);
-    assertCounter("NumKeyListFails", 1L, ksmMetrics);
-  }
-
-  /**
-   * Test volume operations with ignoring thrown exception.
-   */
-  private void doVolumeOps() {
-    try {
-      ksmManager.createVolume(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.deleteVolume(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.getVolumeInfo(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.checkVolumeAccess(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.setOwner(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.listAllVolumes(null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-
-  /**
-   * Test bucket operations with ignoring thrown exception.
-   */
-  private void doBucketOps() {
-    try {
-      ksmManager.createBucket(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.deleteBucket(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.getBucketInfo(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.setBucketProperty(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.listBuckets(null, null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-
-  /**
-   * Test key operations with ignoring thrown exception.
-   */
-  private void doKeyOps() {
-    try {
-      ksmManager.openKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.deleteKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.lookupKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.listKeys(null, null, null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java
deleted file mode 100644
index 7b92ec7..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.scm.cli.SQLCLI;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class tests the CLI that transforms ksm.db into SQLite DB files.
- */
-@RunWith(Parameterized.class)
-public class TestKSMSQLCli {
-  private MiniOzoneCluster cluster = null;
-  private StorageHandler storageHandler;
-  private UserArgs userArgs;
-  private OzoneConfiguration conf;
-  private SQLCLI cli;
-
-  private String userName = "userTest";
-  private String adminName = "adminTest";
-  private String volumeName0 = "volumeTest0";
-  private String volumeName1 = "volumeTest1";
-  private String bucketName0 = "bucketTest0";
-  private String bucketName1 = "bucketTest1";
-  private String bucketName2 = "bucketTest2";
-  private String keyName0 = "key0";
-  private String keyName1 = "key1";
-  private String keyName2 = "key2";
-  private String keyName3 = "key3";
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
-        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
-    });
-  }
-
-  private String metaStoreType;
-
-  public TestKSMSQLCli(String type) {
-    metaStoreType = type;
-  }
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    cluster.waitForClusterToBeReady();
-
-    VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs);
-    createVolumeArgs0.setUserName(userName);
-    createVolumeArgs0.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs0);
-    VolumeArgs createVolumeArgs1 = new VolumeArgs(volumeName1, userArgs);
-    createVolumeArgs1.setUserName(userName);
-    createVolumeArgs1.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs1);
-
-    BucketArgs bucketArgs0 = new BucketArgs(volumeName0, bucketName0, userArgs);
-    storageHandler.createBucket(bucketArgs0);
-    BucketArgs bucketArgs1 = new BucketArgs(volumeName1, bucketName1, userArgs);
-    storageHandler.createBucket(bucketArgs1);
-    BucketArgs bucketArgs2 = new BucketArgs(volumeName0, bucketName2, userArgs);
-    storageHandler.createBucket(bucketArgs2);
-
-    KeyArgs keyArgs0 =
-        new KeyArgs(volumeName0, bucketName0, keyName0, userArgs);
-    keyArgs0.setSize(100);
-    KeyArgs keyArgs1 =
-        new KeyArgs(volumeName1, bucketName1, keyName1, userArgs);
-    keyArgs1.setSize(200);
-    KeyArgs keyArgs2 =
-        new KeyArgs(volumeName0, bucketName2, keyName2, userArgs);
-    keyArgs2.setSize(300);
-    KeyArgs keyArgs3 =
-        new KeyArgs(volumeName0, bucketName2, keyName3, userArgs);
-    keyArgs3.setSize(400);
-
-    OutputStream stream = storageHandler.newKeyWriter(keyArgs0);
-    stream.close();
-    stream = storageHandler.newKeyWriter(keyArgs1);
-    stream.close();
-    stream = storageHandler.newKeyWriter(keyArgs2);
-    stream.close();
-    stream = storageHandler.newKeyWriter(keyArgs3);
-    stream.close();
-
-    cluster.getKeySpaceManager().stop();
-    cluster.getStorageContainerManager().stop();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
-    cli = new SQLCLI(conf);
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testKSMDB() throws Exception {
-    String dbOutPath =  GenericTestUtils.getTempPath(
-        UUID.randomUUID() + "/out_sql.db");
-
-    String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
-    String dbPath = dbRootPath + "/" + KSM_DB_NAME;
-    String[] args = {"-p", dbPath, "-o", dbOutPath};
-
-    cli.run(args);
-
-    Connection conn = connectDB(dbOutPath);
-    String sql = "SELECT * FROM volumeList";
-    ResultSet rs = executeQuery(conn, sql);
-    List<String> expectedValues =
-        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String userNameRs = rs.getString("userName");
-      String volumeNameRs = rs.getString("volumeName");
-      assertEquals(userName,  userNameRs.substring(1));
-      assertTrue(expectedValues.remove(volumeNameRs));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM volumeInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM aclInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      String type = rs.getString("type");
-      String uName = rs.getString("userName");
-      String rights = rs.getString("rights");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertEquals("USER", type);
-      assertEquals(userName, uName);
-      assertEquals("READ_WRITE", rights);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM bucketInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, String> expectedMap = new HashMap<>();
-    expectedMap.put(bucketName0, volumeName0);
-    expectedMap.put(bucketName2, volumeName0);
-    expectedMap.put(bucketName1, volumeName1);
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      boolean versionEnabled = rs.getBoolean("versionEnabled");
-      String storegeType = rs.getString("storageType");
-      assertEquals(volumeName, expectedMap.remove(bucketName));
-      assertFalse(versionEnabled);
-      assertEquals("DISK", storegeType);
-    }
-    assertEquals(0, expectedMap.size());
-
-    sql = "SELECT * FROM keyInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, List<String>> expectedMap2 = new HashMap<>();
-    // no data written, data size will be 0
-    expectedMap2.put(keyName0,
-        Arrays.asList(volumeName0, bucketName0, "0"));
-    expectedMap2.put(keyName1,
-        Arrays.asList(volumeName1, bucketName1, "0"));
-    expectedMap2.put(keyName2,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    expectedMap2.put(keyName3,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      String keyName = rs.getString("keyName");
-      int dataSize = rs.getInt("dataSize");
-      List<String> vals = expectedMap2.remove(keyName);
-      assertNotNull(vals);
-      assertEquals(vals.get(0), volumeName);
-      assertEquals(vals.get(1), bucketName);
-      assertEquals(vals.get(2), Integer.toString(dataSize));
-    }
-    assertEquals(0, expectedMap2.size());
-
-    conn.close();
-    Files.delete(Paths.get(dbOutPath));
-  }
-
-  private ResultSet executeQuery(Connection conn, String sql)
-      throws SQLException {
-    Statement stmt = conn.createStatement();
-    return stmt.executeQuery(sql);
-  }
-
-  private Connection connectDB(String dbPath) throws Exception {
-    Class.forName("org.sqlite.JDBC");
-    String connectPath =
-        String.format("jdbc:sqlite:%s", dbPath);
-    return DriverManager.getConnection(connectPath);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
deleted file mode 100644
index 8a16bfe..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
+++ /dev/null
@@ -1,1350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.hdds.scm.server.SCMStorage;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.web.handlers.ListArgs;
-import org.apache.hadoop.ozone.web.response.ListBuckets;
-import org.apache.hadoop.ozone.web.response.ListKeys;
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataStore;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.net.InetSocketAddress;
-import java.text.ParseException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.List;
-import java.util.UUID;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CLIENT_ADDRESS_KEY;
-
-/**
- * Test Key Space Manager operation in distributed handler scenario.
- */
-public class TestKeySpaceManager {
-  private static MiniOzoneCluster cluster = null;
-  private static StorageHandler storageHandler;
-  private static UserArgs userArgs;
-  private static KSMMetrics ksmMetrics;
-  private static OzoneConfiguration conf;
-  private static String clusterId;
-  private static String scmId;
-  private static String ksmId;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    ksmId = UUID.randomUUID().toString();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    cluster =  MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setKsmId(ksmId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    ksmMetrics = cluster.getKeySpaceManager().getMetrics();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  // Create a volume and test its attribute after creating them
-  @Test(timeout = 60000)
-  public void testCreateVolume() throws IOException, OzoneException {
-    long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(volumeCreateFailCount,
-        ksmMetrics.getNumVolumeCreateFails());
-  }
-
-  // Create a volume and modify the volume owner and then test its attributes
-  @Test(timeout = 60000)
-  public void testChangeVolumeOwner() throws IOException, OzoneException {
-    long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails();
-    long volumeInfoFailCount = ksmMetrics.getNumVolumeInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    String newUserName = "user" + RandomStringUtils.randomNumeric(5);
-    createVolumeArgs.setUserName(newUserName);
-    storageHandler.setVolumeOwner(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName));
-    Assert.assertEquals(volumeCreateFailCount,
-        ksmMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(volumeInfoFailCount,
-        ksmMetrics.getNumVolumeInfoFails());
-  }
-
-  // Create a volume and modify the volume owner and then test its attributes
-  @Test(timeout = 60000)
-  public void testChangeVolumeQuota() throws IOException, OzoneException {
-    long numVolumeCreateFail = ksmMetrics.getNumVolumeCreateFails();
-    long numVolumeInfoFail = ksmMetrics.getNumVolumeInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    Random rand = new Random();
-
-    // Create a new volume with a quota
-    OzoneQuota createQuota =
-        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    createVolumeArgs.setQuota(createQuota);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(createQuota.sizeInBytes(),
-        retVolumeInfo.getQuota().sizeInBytes());
-
-    // Set a new quota and test it
-    OzoneQuota setQuota =
-        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
-    createVolumeArgs.setQuota(setQuota);
-    storageHandler.setVolumeQuota(createVolumeArgs, false);
-    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(setQuota.sizeInBytes(),
-        retVolumeInfo.getQuota().sizeInBytes());
-
-    // Remove the quota and test it again
-    storageHandler.setVolumeQuota(createVolumeArgs, true);
-    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(OzoneConsts.MAX_QUOTA_IN_BYTES,
-        retVolumeInfo.getQuota().sizeInBytes());
-    Assert.assertEquals(numVolumeCreateFail,
-        ksmMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(numVolumeInfoFail,
-        ksmMetrics.getNumVolumeInfoFails());
-  }
-
-  // Create a volume and then delete it and then check for deletion
-  @Test(timeout = 60000)
-  public void testDeleteVolume() throws IOException, OzoneException {
-    long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String volumeName1 = volumeName + "_A";
-    String volumeName2 = volumeName + "_AA";
-    VolumeArgs volumeArgs = null;
-    VolumeInfo volumeInfo = null;
-
-    // Create 2 empty volumes with same prefix.
-    volumeArgs = new VolumeArgs(volumeName1, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    volumeArgs = new VolumeArgs(volumeName2, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    volumeArgs  = new VolumeArgs(volumeName1, userArgs);
-    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
-    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName1));
-    Assert.assertTrue(volumeInfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(volumeCreateFailCount,
-        ksmMetrics.getNumVolumeCreateFails());
-
-    // Volume with _A should be able to delete as it is empty.
-    storageHandler.deleteVolume(volumeArgs);
-
-    // Make sure volume with _AA suffix still exists.
-    volumeArgs = new VolumeArgs(volumeName2, userArgs);
-    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
-    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName2));
-
-    // Make sure volume with _A suffix is successfully deleted.
-    exception.expect(IOException.class);
-    exception.expectMessage("Info Volume failed, error:VOLUME_NOT_FOUND");
-    volumeArgs = new VolumeArgs(volumeName1, userArgs);
-    storageHandler.getVolumeInfo(volumeArgs);
-  }
-
-  // Create a volume and a bucket inside the volume,
-  // then delete it and then check for deletion failure
-  @Test(timeout = 60000)
-  public void testFailedDeleteVolume() throws IOException, OzoneException {
-    long numVolumeCreateFails = ksmMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(numVolumeCreateFails,
-        ksmMetrics.getNumVolumeCreateFails());
-
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    try {
-      storageHandler.deleteVolume(createVolumeArgs);
-      Assert.fail("Expecting deletion should fail "
-          + "because volume is not empty");
-    } catch (IOException ex) {
-      Assert.assertEquals(ex.getMessage(),
-          "Delete Volume failed, error:VOLUME_NOT_EMPTY");
-    }
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
-  }
-
-  // Create a volume and test Volume access for a different user
-  @Test(timeout = 60000)
-  public void testAccessVolume() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String[] groupName =
-        {"group" + RandomStringUtils.randomNumeric(5)};
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    createVolumeArgs.setGroups(groupName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, userName,
-        OzoneAcl.OzoneACLRights.READ_WRITE);
-    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, userAcl));
-    OzoneAcl group = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, groupName[0],
-        OzoneAcl.OzoneACLRights.READ);
-    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, group));
-
-    // Create a different user and access should fail
-    String falseUserName = "user" + RandomStringUtils.randomNumeric(5);
-    OzoneAcl falseUserAcl =
-        new OzoneAcl(OzoneAcl.OzoneACLType.USER, falseUserName,
-            OzoneAcl.OzoneACLRights.READ_WRITE);
-    Assert.assertFalse(storageHandler
-        .checkVolumeAccess(volumeName, falseUserAcl));
-    // Checking access with user name and Group Type should fail
-    OzoneAcl falseGroupAcl = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, userName,
-        OzoneAcl.OzoneACLRights.READ_WRITE);
-    Assert.assertFalse(storageHandler
-        .checkVolumeAccess(volumeName, falseGroupAcl));
-
-    // Access for acl type world should also fail
-    OzoneAcl worldAcl =
-        new OzoneAcl(OzoneAcl.OzoneACLType.WORLD, "",
-            OzoneAcl.OzoneACLRights.READ);
-    Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl));
-
-    Assert.assertEquals(0, ksmMetrics.getNumVolumeCheckAccessFails());
-    Assert.assertEquals(0, ksmMetrics.getNumVolumeCreateFails());
-  }
-
-  @Test(timeout = 60000)
-  public void testCreateBucket() throws IOException, OzoneException {
-    long numVolumeCreateFail = ksmMetrics.getNumVolumeCreateFails();
-    long numBucketCreateFail = ksmMetrics.getNumBucketCreateFails();
-    long numBucketInfoFail = ksmMetrics.getNumBucketInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    Assert.assertEquals(numVolumeCreateFail,
-        ksmMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(numBucketCreateFail,
-        ksmMetrics.getNumBucketCreateFails());
-    Assert.assertEquals(numBucketInfoFail,
-        ksmMetrics.getNumBucketInfoFails());
-  }
-
-  @Test(timeout = 60000)
-  public void testDeleteBucket() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    storageHandler.deleteBucket(bucketArgs);
-    exception.expect(IOException.class);
-    exception.expectMessage("Info Bucket failed, error: BUCKET_NOT_FOUND");
-    storageHandler.getBucketInfo(getBucketArgs);
-  }
-
-  @Test(timeout = 60000)
-  public void testDeleteNonExistingBucket() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    BucketArgs newBucketArgs = new BucketArgs(
-        volumeName, bucketName + "_invalid", userArgs);
-    exception.expect(IOException.class);
-    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_FOUND");
-    storageHandler.deleteBucket(newBucketArgs);
-  }
-
-
-  @Test(timeout = 60000)
-  public void testDeleteNonEmptyBucket() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    String dataString = RandomStringUtils.randomAscii(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    exception.expect(IOException.class);
-    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_EMPTY");
-    storageHandler.deleteBucket(bucketArgs);
-  }
-
-  /**
-   * Basic test of both putKey and getKey from KSM, as one can not be tested
-   * without the other.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetKeyWriterReader() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyAllocates = ksmMetrics.getNumKeyAllocates();
-    long numKeyLookups = ksmMetrics.getNumKeyLookups();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    Assert.assertEquals(1 + numKeyAllocates, ksmMetrics.getNumKeyAllocates());
-
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
-    Assert.assertEquals(1 + numKeyLookups, ksmMetrics.getNumKeyLookups());
-  }
-
-  /**
-   * Test write the same key twice, the second write should fail, as currently
-   * key overwrite is not supported.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testKeyOverwrite() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyAllocateFails = ksmMetrics.getNumKeyAllocateFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    // We allow the key overwrite to be successful. Please note : Till
-    // HDFS-11922 is fixed this causes a data block leak on the data node side.
-    // That is this overwrite only overwrites the keys on KSM. We need to
-    // garbage collect those blocks from datanode.
-    KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    storageHandler.newKeyWriter(keyArgs2);
-    Assert
-        .assertEquals(numKeyAllocateFails, ksmMetrics.getNumKeyAllocateFails());
-  }
-
-  /**
-   * Test get a non-exiting key.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetNonExistKey() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyLookupFails = ksmMetrics.getNumKeyLookupFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    // try to get the key, should fail as it hasn't been created
-    exception.expect(IOException.class);
-    exception.expectMessage("KEY_NOT_FOUND");
-    storageHandler.newKeyReader(keyArgs);
-    Assert.assertEquals(1 + numKeyLookupFails,
-        ksmMetrics.getNumKeyLookupFails());
-  }
-
-  /**
-   * Test delete keys for ksm.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testDeleteKey() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyDeletes = ksmMetrics.getNumKeyDeletes();
-    long numKeyDeleteFails = ksmMetrics.getNumKeyDeletesFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    storageHandler.deleteKey(keyArgs);
-    Assert.assertEquals(1 + numKeyDeletes, ksmMetrics.getNumKeyDeletes());
-
-    // Make sure the deleted key has been renamed.
-    MetadataStore store = cluster.getKeySpaceManager().
-        getMetadataManager().getStore();
-    List<Map.Entry<byte[], byte[]>> list = store.getRangeKVs(null, 10,
-        new MetadataKeyFilters.KeyPrefixFilter()
-            .addFilter(DELETING_KEY_PREFIX));
-    Assert.assertEquals(1, list.size());
-
-    // Delete the key again to test deleting non-existing key.
-    try {
-      storageHandler.deleteKey(keyArgs);
-      Assert.fail("Expected exception not thrown.");
-    } catch (IOException ioe) {
-      Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
-    }
-    Assert.assertEquals(1 + numKeyDeleteFails,
-        ksmMetrics.getNumKeyDeletesFails());
-  }
-
-  /**
-   * Test rename key for ksm.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testRenameKey() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyRenames = ksmMetrics.getNumKeyRenames();
-    long numKeyRenameFails = ksmMetrics.getNumKeyRenameFails();
-    int testRenameFails = 0;
-    int testRenames = 0;
-    IOException ioe = null;
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    String toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    // Rename from non-existent key should fail
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    // Write the contents of the key to be renamed
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    // Rename the key
-    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-    testRenames++;
-    storageHandler.renameKey(keyArgs, toKeyName);
-    Assert.assertEquals(numKeyRenames + testRenames,
-        ksmMetrics.getNumKeyRenames());
-    Assert.assertEquals(numKeyRenameFails + testRenameFails,
-        ksmMetrics.getNumKeyRenameFails());
-
-    // Try to get the key, should fail as it has been renamed
-    try {
-      storageHandler.newKeyReader(keyArgs);
-    } catch (IOException e) {
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
-
-    // Verify the contents of the renamed key
-    keyArgs = new KeyArgs(toKeyName, bucketArgs);
-    InputStream in = storageHandler.newKeyReader(keyArgs);
-    byte[] b = new byte[dataString.getBytes().length];
-    in.read(b);
-    Assert.assertEquals(new String(b), dataString);
-
-    // Rewrite the renamed key. Rename to key which already exists should fail.
-    keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-      stream.close();
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    // Rename to empty string should fail
-    toKeyName = "";
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    // Rename from empty string should fail
-    keyArgs = new KeyArgs("", bucketArgs);
-    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    Assert.assertEquals(numKeyRenames + testRenames,
-        ksmMetrics.getNumKeyRenames());
-    Assert.assertEquals(numKeyRenameFails + testRenameFails,
-        ksmMetrics.getNumKeyRenameFails());
-  }
-
-  @Test(timeout = 60000)
-  public void testListBuckets() throws IOException, OzoneException {
-    ListBuckets result = null;
-    ListArgs listBucketArgs = null;
-
-    // Create volume - volA.
-    final String volAname = "volA";
-    VolumeArgs volAArgs = new VolumeArgs(volAname, userArgs);
-    volAArgs.setUserName("userA");
-    volAArgs.setAdminName("adminA");
-    storageHandler.createVolume(volAArgs);
-
-    // Create 20 buckets in volA for tests.
-    for (int i=0; i<10; i++) {
-      // Create "/volA/aBucket_0" to "/volA/aBucket_9" buckets in volA volume.
-      BucketArgs aBuckets = new BucketArgs(volAname,
-          "aBucket_" + i, userArgs);
-      if(i % 3 == 0) {
-        aBuckets.setStorageType(StorageType.ARCHIVE);
-      } else {
-        aBuckets.setStorageType(StorageType.DISK);
-      }
-      storageHandler.createBucket(aBuckets);
-
-      // Create "/volA/bBucket_0" to "/volA/bBucket_9" buckets in volA volume.
-      BucketArgs bBuckets = new BucketArgs(volAname,
-          "bBucket_" + i, userArgs);
-      if(i % 3 == 0) {
-        bBuckets.setStorageType(StorageType.RAM_DISK);
-      } else {
-        bBuckets.setStorageType(StorageType.SSD);
-      }
-      storageHandler.createBucket(bBuckets);
-    }
-
-    VolumeArgs volArgs = new VolumeArgs(volAname, userArgs);
-
-    // List all buckets in volA.
-    listBucketArgs = new ListArgs(volArgs, null, 100, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(20, result.getBuckets().size());
-    List<BucketInfo> archiveBuckets = result.getBuckets().stream()
-        .filter(item -> item.getStorageType() == StorageType.ARCHIVE)
-        .collect(Collectors.toList());
-    Assert.assertEquals(4, archiveBuckets.size());
-
-    // List buckets with prefix "aBucket".
-    listBucketArgs = new ListArgs(volArgs, "aBucket", 100, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(10, result.getBuckets().size());
-    Assert.assertTrue(result.getBuckets().stream()
-        .allMatch(entry -> entry.getBucketName().startsWith("aBucket")));
-
-    // List a certain number of buckets.
-    listBucketArgs = new ListArgs(volArgs, null, 3, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(3, result.getBuckets().size());
-    Assert.assertEquals("aBucket_0",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("aBucket_1",
-        result.getBuckets().get(1).getBucketName());
-    Assert.assertEquals("aBucket_2",
-        result.getBuckets().get(2).getBucketName());
-
-    // List a certain number of buckets from the startKey.
-    listBucketArgs = new ListArgs(volArgs, null, 2, "bBucket_3");
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(2, result.getBuckets().size());
-    Assert.assertEquals("bBucket_4",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("bBucket_5",
-        result.getBuckets().get(1).getBucketName());
-
-    // Provide an invalid bucket name as start key.
-    listBucketArgs = new ListArgs(volArgs, null, 100, "unknown_bucket_name");
-    ListBuckets buckets = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(buckets.getBuckets().size(), 0);
-
-    // Use all arguments.
-    listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_7");
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(2, result.getBuckets().size());
-    Assert.assertEquals("bBucket_8",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("bBucket_9",
-        result.getBuckets().get(1).getBucketName());
-
-    // Provide an invalid maxKeys argument.
-    try {
-      listBucketArgs = new ListArgs(volArgs, null, -1, null);
-      storageHandler.listBuckets(listBucketArgs);
-      Assert.fail("Expecting an error when the given"
-          + " maxKeys argument is invalid.");
-    } catch (Exception e) {
-      Assert.assertTrue(e.getMessage()
-          .contains(String.format("the value must be in range (0, %d]",
-              OzoneConsts.MAX_LISTBUCKETS_SIZE)));
-    }
-
-    // Provide an invalid volume name.
-    VolumeArgs invalidVolArgs = new VolumeArgs("invalid_name", userArgs);
-    try {
-      listBucketArgs = new ListArgs(invalidVolArgs, null, 100, null);
-      storageHandler.listBuckets(listBucketArgs);
-      Assert.fail("Expecting an error when the given volume name is invalid.");
-    } catch (Exception e) {
-      Assert.assertTrue(e instanceof IOException);
-      Assert.assertTrue(e.getMessage()
-          .contains(Status.VOLUME_NOT_FOUND.name()));
-    }
-  }
-
-  /**
-   * Test list keys.
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testListKeys() throws IOException, OzoneException {
-    ListKeys result = null;
-    ListArgs listKeyArgs = null;
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    // Write 20 keys in bucket.
-    int numKeys = 20;
-    String keyName = "Key";
-    KeyArgs keyArgs = null;
-    for (int i = 0; i < numKeys; i++) {
-      if (i % 2 == 0) {
-        // Create /volume/bucket/aKey[0,2,4,...,18] in bucket.
-        keyArgs = new KeyArgs("a" + keyName + i, bucketArgs);
-      } else {
-        // Create /volume/bucket/bKey[1,3,5,...,19] in bucket.
-        keyArgs = new KeyArgs("b" + keyName + i, bucketArgs);
-      }
-      keyArgs.setSize(4096);
-
-      // Just for testing list keys call, so no need to write real data.
-      OutputStream stream = storageHandler.newKeyWriter(keyArgs);
-      stream.close();
-    }
-
-    // List all keys in bucket.
-    bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    listKeyArgs = new ListArgs(bucketArgs, null, 100, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(numKeys, result.getKeyList().size());
-
-    // List keys with prefix "aKey".
-    listKeyArgs = new ListArgs(bucketArgs, "aKey", 100, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(numKeys / 2, result.getKeyList().size());
-    Assert.assertTrue(result.getKeyList().stream()
-        .allMatch(entry -> entry.getKeyName().startsWith("aKey")));
-
-    // List a certain number of keys.
-    listKeyArgs = new ListArgs(bucketArgs, null, 3, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(3, result.getKeyList().size());
-    Assert.assertEquals("aKey0",
-        result.getKeyList().get(0).getKeyName());
-    Assert.assertEquals("aKey10",
-        result.getKeyList().get(1).getKeyName());
-    Assert.assertEquals("aKey12",
-        result.getKeyList().get(2).getKeyName());
-
-    // List a certain number of keys from the startKey.
-    listKeyArgs = new ListArgs(bucketArgs, null, 2, "bKey1");
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(2, result.getKeyList().size());
-    Assert.assertEquals("bKey11",
-        result.getKeyList().get(0).getKeyName());
-    Assert.assertEquals("bKey13",
-        result.getKeyList().get(1).getKeyName());
-
-    // Provide an invalid key name as start key.
-    listKeyArgs = new ListArgs(bucketArgs, null, 100, "invalid_start_key");
-    ListKeys keys = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(keys.getKeyList().size(), 0);
-
-    // Provide an invalid maxKeys argument.
-    try {
-      listKeyArgs = new ListArgs(bucketArgs, null, -1, null);
-      storageHandler.listBuckets(listKeyArgs);
-      Assert.fail("Expecting an error when the given"
-          + " maxKeys argument is invalid.");
-    } catch (Exception e) {
-      GenericTestUtils.assertExceptionContains(
-          String.format("the value must be in range (0, %d]",
-              OzoneConsts.MAX_LISTKEYS_SIZE), e);
-    }
-
-    // Provide an invalid bucket name.
-    bucketArgs = new BucketArgs("invalid_bucket", createVolumeArgs);
-    try {
-      listKeyArgs = new ListArgs(bucketArgs, null, numKeys, null);
-      storageHandler.listKeys(listKeyArgs);
-      Assert.fail(
-          "Expecting an error when the given bucket name is invalid.");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains(
-          Status.BUCKET_NOT_FOUND.name(), e);
-    }
-  }
-
-  @Test
-  public void testListVolumes() throws IOException, OzoneException {
-
-    String user0 = "testListVolumes-user-0";
-    String user1 = "testListVolumes-user-1";
-    String adminUser = "testListVolumes-admin";
-    ListArgs listVolumeArgs;
-    ListVolumes volumes;
-
-    // Create 10 volumes by user0 and user1
-    String[] user0vols = new String[10];
-    String[] user1vols = new String[10];
-    for (int i =0; i<10; i++) {
-      VolumeArgs createVolumeArgs;
-      String user0VolName = "Vol-" + user0 + "-" + i;
-      user0vols[i] = user0VolName;
-      createVolumeArgs = new VolumeArgs(user0VolName, userArgs);
-      createVolumeArgs.setUserName(user0);
-      createVolumeArgs.setAdminName(adminUser);
-      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
-      storageHandler.createVolume(createVolumeArgs);
-
-      String user1VolName = "Vol-" + user1 + "-" + i;
-      user1vols[i] = user1VolName;
-      createVolumeArgs = new VolumeArgs(user1VolName, userArgs);
-      createVolumeArgs.setUserName(user1);
-      createVolumeArgs.setAdminName(adminUser);
-      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
-      storageHandler.createVolume(createVolumeArgs);
-    }
-
-    // Test list all volumes
-    UserArgs userArgs0 = new UserArgs(user0, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgs0, "Vol-testListVolumes", 100, null);
-    listVolumeArgs.setRootScan(true);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(20, volumes.getVolumes().size());
-
-    // Test list all volumes belongs to an user
-    listVolumeArgs = new ListArgs(userArgs0, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(10, volumes.getVolumes().size());
-
-    // Test prefix
-    listVolumeArgs = new ListArgs(userArgs0,
-        "Vol-" + user0 + "-3", 100, null);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(1, volumes.getVolumes().size());
-    Assert.assertEquals(user0vols[3],
-        volumes.getVolumes().get(0).getVolumeName());
-    Assert.assertEquals(user0,
-        volumes.getVolumes().get(0).getOwner().getName());
-
-    // Test list volumes by user
-    UserArgs userArgs1 = new UserArgs(user1, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgs1, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(10, volumes.getVolumes().size());
-    Assert.assertEquals(user1,
-        volumes.getVolumes().get(3).getOwner().getName());
-
-    // Make sure all available fields are returned
-    final String user0vol4 = "Vol-" + user0 + "-4";
-    final String user0vol5 = "Vol-" + user0 + "-5";
-    listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol4);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(1, volumes.getVolumes().size());
-    Assert.assertEquals(user0,
-        volumes.getVolumes().get(0).getOwner().getName());
-    Assert.assertEquals(user0vol5,
-        volumes.getVolumes().get(0).getVolumeName());
-    Assert.assertEquals(5,
-        volumes.getVolumes().get(0).getQuota().getSize());
-    Assert.assertEquals(OzoneQuota.Units.GB,
-        volumes.getVolumes().get(0).getQuota().getUnit());
-
-    // User doesn't have volumes
-    UserArgs userArgsX = new UserArgs("unknwonUser", OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgsX, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(0, volumes.getVolumes().size());
-  }
-
-  /**
-   * Test get key information.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetKeyInfo() throws IOException,
-      OzoneException, ParseException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    long currentTime = Time.now();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String keyName = "testKey";
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(4096);
-
-
-    OutputStream stream = storageHandler.newKeyWriter(keyArgs);
-    stream.close();
-
-    KeyInfo keyInfo = storageHandler.getKeyInfo(keyArgs);
-    // Compare the time in second unit since the date string reparsed to
-    // millisecond will lose precision.
-    Assert.assertTrue(
-        (HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()) / 1000) >= (
-            currentTime / 1000));
-    Assert.assertTrue(
-        (HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()) / 1000) >= (
-            currentTime / 1000));
-    Assert.assertEquals(keyName, keyInfo.getKeyName());
-    // with out data written, the size would be 0
-    Assert.assertEquals(0, keyInfo.getSize());
-  }
-
-  /**
-   * Test that the write can proceed without having to set the right size.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testWriteSize() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(100);
-    // write a key without specifying size at all
-    String keyName = "testKey";
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
-
-    // write a key with a size, but write above it.
-    String keyName1 = "testKey1";
-    KeyArgs keyArgs1 = new KeyArgs(keyName1, bucketArgs);
-    keyArgs1.setSize(30);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs1)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data1 = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs1)) {
-      in.read(data1);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data1));
-  }
-
-  /**
-   * Tests the RPC call for getting scmId and clusterId from SCM.
-   * @throws IOException
-   */
-  @Test
-  public void testGetScmInfo() throws IOException {
-    ScmInfo info = cluster.getKeySpaceManager().getScmInfo();
-    Assert.assertEquals(clusterId, info.getClusterId());
-    Assert.assertEquals(scmId, info.getScmId());
-  }
-
-
-  @Test
-  public void testExpiredOpenKey() throws Exception {
-    BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
-        .getKeySpaceManager().getKeyManager()).getOpenKeyCleanupService();
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    // open some keys.
-
-    KeyArgs keyArgs1 = new KeyArgs("testKey1", bucketArgs);
-    KeyArgs keyArgs2 = new KeyArgs("testKey2", bucketArgs);
-    KeyArgs keyArgs3 = new KeyArgs("testKey3", bucketArgs);
-    KeyArgs keyArgs4 = new KeyArgs("testKey4", bucketArgs);
-    List<BlockGroup> openKeys;
-    storageHandler.newKeyWriter(keyArgs1);
-    storageHandler.newKeyWriter(keyArgs2);
-    storageHandler.newKeyWriter(keyArgs3);
-    storageHandler.newKeyWriter(keyArgs4);
-
-    Set<String> expected = Stream.of(
-        "testKey1", "testKey2", "testKey3", "testKey4")
-        .collect(Collectors.toSet());
-
-    // Now all k1-k4 should be in open state, so ExpiredOpenKeys should not
-    // contain these values.
-    openKeys = cluster.getKeySpaceManager()
-        .getMetadataManager().getExpiredOpenKeys();
-
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      Assert.assertFalse(expected.contains(keyName));
-    }
-
-    Thread.sleep(2000);
-    // Now all k1-k4 should be in ExpiredOpenKeys
-    openKeys = cluster.getKeySpaceManager()
-        .getMetadataManager().getExpiredOpenKeys();
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      if (expected.contains(keyName)) {
-        expected.remove(keyName);
-      }
-    }
-    Assert.assertEquals(0, expected.size());
-
-    KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs);
-    storageHandler.newKeyWriter(keyArgs5);
-
-    openKeyCleanUpService.triggerBackgroundTaskForTesting();
-    Thread.sleep(2000);
-    // now all k1-k4 should have been removed by the clean-up task, only k5
-    // should be present in ExpiredOpenKeys.
-    openKeys =
-        cluster.getKeySpaceManager().getMetadataManager().getExpiredOpenKeys();
-    System.out.println(openKeys);
-    boolean key5found = false;
-    Set<String> removed = Stream.of(
-        "testKey1", "testKey2", "testKey3", "testKey4")
-        .collect(Collectors.toSet());
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      Assert.assertFalse(removed.contains(keyName));
-      if (keyName.equals("testKey5")) {
-        key5found = true;
-      }
-    }
-    Assert.assertTrue(key5found);
-  }
-
-  /**
-   * Tests the KSM Initialization.
-   * @throws IOException
-   */
-  @Test
-  public void testKSMInitialization() throws IOException {
-    // Read the version file info from KSM version file
-    KSMStorage ksmStorage = cluster.getKeySpaceManager().getKsmStorage();
-    SCMStorage scmStorage = new SCMStorage(conf);
-    // asserts whether cluster Id and SCM ID are properly set in SCM Version
-    // file.
-    Assert.assertEquals(clusterId, scmStorage.getClusterID());
-    Assert.assertEquals(scmId, scmStorage.getScmId());
-    // asserts whether KSM Id is properly set in KSM Version file.
-    Assert.assertEquals(ksmId, ksmStorage.getKsmId());
-    // asserts whether the SCM info is correct in KSM Version file.
-    Assert.assertEquals(clusterId, ksmStorage.getClusterID());
-    Assert.assertEquals(scmId, ksmStorage.getScmId());
-  }
-
-  /**
-   * Tests the KSM Initialization Failure.
-   * @throws IOException
-   */
-  @Test
-  public void testKSMInitializationFailure() throws Exception {
-    OzoneConfiguration config = new OzoneConfiguration();
-    final String path =
-        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
-    Path metaDirPath = Paths.get(path, "ksm-meta");
-    config.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
-    config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
-        conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY));
-    exception.expect(KSMException.class);
-    exception.expectMessage("KSM not initialized.");
-    KeySpaceManager.createKSM(null, config);
-    KSMStorage ksmStore = new KSMStorage(config);
-    ksmStore.setClusterId("testClusterId");
-    ksmStore.setScmId("testScmId");
-    // writes the version file properties
-    ksmStore.initialize();
-    exception.expect(KSMException.class);
-    exception.expectMessage("SCM version info mismatch.");
-    KeySpaceManager.createKSM(null, conf);
-  }
-
-  @Test
-  public void testGetServiceList() throws IOException {
-    long numGetServiceListCalls = ksmMetrics.getNumGetServiceLists();
-    List<ServiceInfo> services = cluster.getKeySpaceManager().getServiceList();
-
-    Assert.assertEquals(numGetServiceListCalls + 1,
-        ksmMetrics.getNumGetServiceLists());
-
-    ServiceInfo ksmInfo = services.stream().filter(
-        a -> a.getNodeType().equals(HddsProtos.NodeType.KSM))
-        .collect(Collectors.toList()).get(0);
-    InetSocketAddress ksmAddress = new InetSocketAddress(ksmInfo.getHostname(),
-        ksmInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(NetUtils.createSocketAddr(
-        conf.get(OZONE_KSM_ADDRESS_KEY)), ksmAddress);
-
-    ServiceInfo scmInfo = services.stream().filter(
-        a -> a.getNodeType().equals(HddsProtos.NodeType.SCM))
-        .collect(Collectors.toList()).get(0);
-    InetSocketAddress scmAddress = new InetSocketAddress(scmInfo.getHostname(),
-        scmInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(NetUtils.createSocketAddr(
-        conf.get(OZONE_SCM_CLIENT_ADDRESS_KEY)), scmAddress);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
deleted file mode 100644
index feb83d3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.core.type.TypeReference;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.http.util.EntityUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddressForClients;
-
-/**
- * This class is to test the REST interface exposed by KeySpaceManager.
- */
-public class TestKeySpaceManagerRestInterface {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testGetServiceList() throws Exception {
-    KeySpaceManagerHttpServer server =
-        cluster.getKeySpaceManager().getHttpServer();
-    HttpClient client = HttpClients.createDefault();
-    String connectionUri = "http://" +
-        NetUtils.getHostPortString(server.getHttpAddress());
-    HttpGet httpGet = new HttpGet(connectionUri + "/serviceList");
-    HttpResponse response = client.execute(httpGet);
-    String serviceListJson = EntityUtils.toString(response.getEntity());
-
-    ObjectMapper objectMapper = new ObjectMapper();
-    TypeReference<List<ServiceInfo>> serviceInfoReference =
-        new TypeReference<List<ServiceInfo>>() {};
-    List<ServiceInfo> serviceInfos = objectMapper.readValue(
-        serviceListJson, serviceInfoReference);
-    Map<HddsProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>();
-    for (ServiceInfo serviceInfo : serviceInfos) {
-      serviceMap.put(serviceInfo.getNodeType(), serviceInfo);
-    }
-
-    InetSocketAddress ksmAddress =
-        getKsmAddressForClients(conf);
-    ServiceInfo ksmInfo = serviceMap.get(HddsProtos.NodeType.KSM);
-
-    Assert.assertEquals(ksmAddress.getHostName(), ksmInfo.getHostname());
-    Assert.assertEquals(ksmAddress.getPort(),
-        ksmInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(server.getHttpAddress().getPort(),
-        ksmInfo.getPort(ServicePort.Type.HTTP));
-
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(conf);
-    ServiceInfo scmInfo = serviceMap.get(HddsProtos.NodeType.SCM);
-
-    Assert.assertEquals(scmAddress.getHostName(), scmInfo.getHostname());
-    Assert.assertEquals(scmAddress.getPort(),
-        scmInfo.getPort(ServicePort.Type.RPC));
-
-    ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE);
-    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails();
-    Assert.assertEquals(datanodeDetails.getHostName(),
-        datanodeInfo.getHostname());
-
-    Map<ServicePort.Type, Integer> ports = datanodeInfo.getPorts();
-    for(ServicePort.Type type : ports.keySet()) {
-      switch (type) {
-      case HTTP:
-      case HTTPS:
-        Assert.assertEquals(
-            datanodeDetails.getPort(DatanodeDetails.Port.Name.REST).getValue(),
-            ports.get(type));
-        break;
-      default:
-        // KSM only sends Datanode's info port details
-        // i.e. HTTP or HTTPS
-        // Other ports are not expected as of now.
-        Assert.fail();
-        break;
-      }
-    }
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: Revert "Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk"

Posted by bh...@apache.org.
Revert "Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk"

This reverts commit c163d1797ade0f47d35b4a44381b8ef1dfec5b60, reversing
changes made to 0d9804dcef2eab5ebf84667d9ca49bb035d9a731.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39ad9890
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39ad9890
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39ad9890

Branch: refs/heads/HDDS-48
Commit: 39ad98903a5f042573b97a2e5438bc57af7cc7a1
Parents: c163d17
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Jul 5 12:22:18 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Jul 5 12:22:18 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 -
 .../api/records/impl/LightWeightResource.java   |  23 +-
 .../scheduler/fair/ConfigurableResource.java    |  69 +----
 .../fair/FairSchedulerConfiguration.java        | 174 ++----------
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    | 160 +++--------
 .../webapp/TestRMWebServices.java               |  31 +--
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 -----------------
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 -
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 ++++---
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ----------
 .../FairSchedulerJsonVerifications.java         | 139 ----------
 .../FairSchedulerXmlVerifications.java          | 153 -----------
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 -------------------
 .../webapp/helper/AppInfoJsonVerifications.java | 123 ---------
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 ---------
 .../webapp/helper/BufferedClientResponse.java   |  57 ----
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ------
 .../ResourceRequestsJsonVerifications.java      | 252 -----------------
 .../ResourceRequestsXmlVerifications.java       | 215 ---------------
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 --------
 .../src/site/markdown/FairScheduler.md          |   6 +-
 28 files changed, 157 insertions(+), 2405 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5cc81e5..5841361 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -67,6 +67,11 @@
   </Match>
   <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
+    <Method name="getLocalityStatistics" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>
@@ -113,18 +118,6 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
 
-  <!-- Ignore exposed internal representations -->
-  <Match>
-    <Class name="org.apache.hadoop.yarn.api.records.Resource" />
-    <Method name="getResources" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
-    <Method name="getLocalityStatistics" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-
   <!-- Object cast is based on the event type -->
   <Match>
     <Class name="org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher" />

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 173d4c9..71a6b54 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
@@ -76,18 +75,6 @@ public abstract class Resource implements Comparable<Resource> {
   @Private
   public static final int VCORES_INDEX = 1;
 
-  /**
-   * Return a new {@link Resource} instance with all resource values
-   * initialized to {@code value}.
-   * @param value the value to use for all resources
-   * @return a new {@link Resource} instance
-   */
-  @Private
-  @Unstable
-  public static Resource newInstance(long value) {
-    return new LightWeightResource(value);
-  }
-
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index 77f77f3..a6e6432 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -18,8 +18,9 @@
 
 package org.apache.hadoop.yarn.api.records.impl;
 
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -57,29 +58,13 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.*;
  *
  * @see Resource
  */
-@Private
+@InterfaceAudience.Private
 @Unstable
 public class LightWeightResource extends Resource {
 
   private ResourceInformation memoryResInfo;
   private ResourceInformation vcoresResInfo;
 
-  /**
-   * Create a new {@link LightWeightResource} instance with all resource values
-   * initialized to {@code value}.
-   * @param value the value to use for all resources
-   */
-  public LightWeightResource(long value) {
-    ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
-    initResourceInformations(value, value, types.length);
-
-    for (int i = 2; i < types.length; i++) {
-      resources[i] = new ResourceInformation();
-      ResourceInformation.copy(types[i], resources[i]);
-      resources[i].setValue(value);
-    }
-  }
-
   public LightWeightResource(long memory, int vcores) {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
@@ -106,7 +91,7 @@ public class LightWeightResource extends Resource {
     }
   }
 
-  private void initResourceInformations(long memory, long vcores,
+  private void initResourceInformations(long memory, int vcores,
       int numberOfKnownResourceTypes) {
     this.memoryResInfo = newDefaultInformation(MEMORY_URI, MEMORY_MB.getUnits(),
         memory);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
index 0c3b0dd..ecdd011 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
@@ -18,13 +18,9 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.util.Arrays;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
  * A {@code ConfigurableResource} object represents an entity that is used to
@@ -37,53 +33,29 @@ public class ConfigurableResource {
   private final Resource resource;
   private final double[] percentages;
 
-  ConfigurableResource() {
-    this(getOneHundredPercentArray());
-  }
-
-  ConfigurableResource(double[] percentages) {
+  public ConfigurableResource(double[] percentages) {
     this.percentages = percentages.clone();
     this.resource = null;
   }
 
-  ConfigurableResource(long value) {
-    this(Resource.newInstance(value));
-  }
-
   public ConfigurableResource(Resource resource) {
     this.percentages = null;
     this.resource = resource;
   }
 
-  private static double[] getOneHundredPercentArray() {
-    double[] resourcePercentages =
-        new double[ResourceUtils.getNumberOfKnownResourceTypes()];
-    Arrays.fill(resourcePercentages, 1.0);
-
-    return resourcePercentages;
-  }
-
   /**
    * Get resource by multiplying the cluster resource and the percentage of
    * each resource respectively. Return the absolute resource if either
    * {@code percentages} or {@code clusterResource} is null.
    *
    * @param clusterResource the cluster resource
-   * @return resource the resulting resource
+   * @return resource
    */
   public Resource getResource(Resource clusterResource) {
     if (percentages != null && clusterResource != null) {
       long memory = (long) (clusterResource.getMemorySize() * percentages[0]);
       int vcore = (int) (clusterResource.getVirtualCores() * percentages[1]);
-      Resource res = Resource.newInstance(memory, vcore);
-      ResourceInformation[] clusterInfo = clusterResource.getResources();
-
-      for (int i = 2; i < clusterInfo.length; i++) {
-        res.setResourceValue(i,
-            (long)(clusterInfo[i].getValue() * percentages[i]));
-      }
-
-      return res;
+      return Resource.newInstance(memory, vcore);
     } else {
       return resource;
     }
@@ -97,39 +69,4 @@ public class ConfigurableResource {
   public Resource getResource() {
     return resource;
   }
-
-  /**
-   * Set the value of the wrapped resource if this object isn't setup to use
-   * percentages. If this object is set to use percentages, this method has
-   * no effect.
-   *
-   * @param name the name of the resource
-   * @param value the value
-   */
-  void setValue(String name, long value) {
-    if (resource != null) {
-      resource.setResourceValue(name, value);
-    }
-  }
-
-  /**
-   * Set the percentage of the resource if this object is setup to use
-   * percentages. If this object is set to use percentages, this method has
-   * no effect.
-   *
-   * @param name the name of the resource
-   * @param value the percentage
-   */
-  void setPercentage(String name, double value) {
-    if (percentages != null) {
-      Integer index = ResourceUtils.getResourceTypeIndex().get(name);
-
-      if (index != null) {
-        percentages[index] = value;
-      } else {
-        throw new ResourceNotFoundException("The requested resource, \""
-            + name + "\", could not be found.");
-      }
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 8c4932b..b50e4bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -214,9 +213,6 @@ public class FairSchedulerConfiguration extends Configuration {
           CONF_PREFIX + "reservable-nodes";
   public static final float RESERVABLE_NODES_DEFAULT = 0.05f;
 
-  private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
-          "Error reading resource config--invalid resource definition: ";
-
   public FairSchedulerConfiguration() {
     super();
   }
@@ -411,167 +407,54 @@ public class FairSchedulerConfiguration extends Configuration {
   }
 
   /**
-   * Parses a resource config value in one of three forms:
-   * <ol>
-   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
-   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
-   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
-   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
-   * </ol>
-   * In new style resources, any resource that is not specified will be
-   * set to {@link Long#MAX_VALUE} or 100%, as appropriate. Also, in the new
-   * style resources, units are not allowed. Units are assumed from the resource
-   * manager's settings for the resources when the value isn't a percentage.
-   *
-   * @param value the resource definition to parse
-   * @return a {@link ConfigurableResource} that represents the parsed value
-   * @throws AllocationConfigurationException if the raw value is not a valid
-   * resource definition
+   * Parses a resource config value of a form like "1024", "1024 mb",
+   * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.
+   * 
+   * @throws AllocationConfigurationException
    */
-  public static ConfigurableResource parseResourceConfigValue(String value)
+  public static ConfigurableResource parseResourceConfigValue(String val)
       throws AllocationConfigurationException {
-    return parseResourceConfigValue(value, Long.MAX_VALUE);
-  }
-
-  /**
-   * Parses a resource config value in one of three forms:
-   * <ol>
-   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
-   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
-   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
-   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
-   * </ol>
-   * In new style resources, any resource that is not specified will be
-   * set to {@code missing} or 0%, as appropriate. Also, in the new style
-   * resources, units are not allowed. Units are assumed from the resource
-   * manager's settings for the resources when the value isn't a percentage.
-   *
-   * The {@code missing} parameter is only used in the case of new style
-   * resources without percentages. With new style resources with percentages,
-   * any missing resources will be assumed to be 100% because percentages are
-   * only used with maximum resource limits.
-   *
-   * @param value the resource definition to parse
-   * @param missing the value to use for any unspecified resources
-   * @return a {@link ConfigurableResource} that represents the parsed value
-   * @throws AllocationConfigurationException if the raw value is not a valid
-   * resource definition
-   */
-  public static ConfigurableResource parseResourceConfigValue(String value,
-      long missing) throws AllocationConfigurationException {
     ConfigurableResource configurableResource;
-
-    if (value.trim().isEmpty()) {
-      throw new AllocationConfigurationException("Error reading resource "
-          + "config--the resource string is empty.");
-    }
-
     try {
-      if (value.contains("=")) {
-        configurableResource = parseNewStyleResource(value, missing);
-      } else if (value.contains("%")) {
-        configurableResource = parseOldStyleResourceAsPercentage(value);
+      val = StringUtils.toLowerCase(val);
+      if (val.contains("%")) {
+        configurableResource = new ConfigurableResource(
+            getResourcePercentage(val));
       } else {
-        configurableResource = parseOldStyleResource(value);
+        int memory = findResource(val, "mb");
+        int vcores = findResource(val, "vcores");
+        configurableResource = new ConfigurableResource(
+            BuilderUtils.newResource(memory, vcores));
       }
-    } catch (RuntimeException ex) {
+    } catch (AllocationConfigurationException ex) {
+      throw ex;
+    } catch (Exception ex) {
       throw new AllocationConfigurationException(
           "Error reading resource config", ex);
     }
-
-    return configurableResource;
-  }
-
-  private static ConfigurableResource parseNewStyleResource(String value,
-          long missing) throws AllocationConfigurationException {
-
-    final ConfigurableResource configurableResource;
-    boolean asPercent = value.contains("%");
-    if (asPercent) {
-      configurableResource = new ConfigurableResource();
-    } else {
-      configurableResource = new ConfigurableResource(missing);
-    }
-
-    String[] resources = value.split(",");
-    for (String resource : resources) {
-      String[] parts = resource.split("=");
-
-      if (parts.length != 2) {
-        throw createConfigException(value,
-                        "Every resource must be of the form: name=value.");
-      }
-
-      String resourceName = parts[0].trim();
-      String resourceValue = parts[1].trim();
-      try {
-        if (asPercent) {
-          configurableResource.setPercentage(resourceName,
-              findPercentage(resourceValue, ""));
-        } else {
-          configurableResource.setValue(resourceName,
-              Long.parseLong(resourceValue));
-        }
-      } catch (ResourceNotFoundException ex) {
-        throw createConfigException(value, "The "
-            + "resource name, \"" + resourceName + "\" was not "
-            + "recognized. Please check the value of "
-            + YarnConfiguration.RESOURCE_TYPES + " in the Resource "
-            + "Manager's configuration files.", ex);
-      } catch (NumberFormatException ex) {
-        // This only comes from Long.parseLong()
-        throw createConfigException(value, "The "
-            + "resource values must all be integers. \"" + resourceValue
-            + "\" is not an integer.", ex);
-      } catch (AllocationConfigurationException ex) {
-        // This only comes from findPercentage()
-        throw createConfigException(value, "The "
-            + "resource values must all be percentages. \""
-            + resourceValue + "\" is either not a number or does not "
-            + "include the '%' symbol.", ex);
-      }
-    }
     return configurableResource;
   }
 
-  private static ConfigurableResource parseOldStyleResourceAsPercentage(
-          String value) throws AllocationConfigurationException {
-    return new ConfigurableResource(
-            getResourcePercentage(StringUtils.toLowerCase(value)));
-  }
-
-  private static ConfigurableResource parseOldStyleResource(String value)
-          throws AllocationConfigurationException {
-    final String lCaseValue = StringUtils.toLowerCase(value);
-    int memory = findResource(lCaseValue, "mb");
-    int vcores = findResource(lCaseValue, "vcores");
-
-    return new ConfigurableResource(
-            BuilderUtils.newResource(memory, vcores));
-  }
-
   private static double[] getResourcePercentage(
       String val) throws AllocationConfigurationException {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
     double[] resourcePercentage = new double[numberOfKnownResourceTypes];
     String[] strings = val.split(",");
-
     if (strings.length == 1) {
       double percentage = findPercentage(strings[0], "");
       for (int i = 0; i < numberOfKnownResourceTypes; i++) {
-        resourcePercentage[i] = percentage;
+        resourcePercentage[i] = percentage/100;
       }
     } else {
-      resourcePercentage[0] = findPercentage(val, "memory");
-      resourcePercentage[1] = findPercentage(val, "cpu");
+      resourcePercentage[0] = findPercentage(val, "memory")/100;
+      resourcePercentage[1] = findPercentage(val, "cpu")/100;
     }
-
     return resourcePercentage;
   }
 
   private static double findPercentage(String val, String units)
-      throws AllocationConfigurationException {
+    throws AllocationConfigurationException {
     final Pattern pattern =
         Pattern.compile("((\\d+)(\\.\\d*)?)\\s*%\\s*" + units);
     Matcher matcher = pattern.matcher(val);
@@ -584,22 +467,7 @@ public class FairSchedulerConfiguration extends Configuration {
             units);
       }
     }
-    return Double.parseDouble(matcher.group(1)) / 100.0;
-  }
-
-  private static AllocationConfigurationException createConfigException(
-          String value, String message) {
-    return createConfigException(value, message, null);
-  }
-
-  private static AllocationConfigurationException createConfigException(
-      String value, String message, Throwable t) {
-    String msg = INVALID_RESOURCE_DEFINITION_PREFIX + value + ". " + message;
-    if (t != null) {
-      return new AllocationConfigurationException(msg, t);
-    } else {
-      return new AllocationConfigurationException(msg);
-    }
+    return Double.parseDouble(matcher.group(1));
   }
 
   public long getUpdateInterval() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index 441c34a..d5a436e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -134,7 +134,7 @@ public class AllocationFileQueueParser {
       if (MIN_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);
         ConfigurableResource val =
-            FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
+            FairSchedulerConfiguration.parseResourceConfigValue(text);
         builder.minQueueResources(queueName, val.getResource());
       } else if (MAX_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 9d82bc7..d47f13d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -479,7 +479,7 @@ public class AppInfo {
   public int getNumNonAMContainersPreempted() {
     return numNonAMContainerPreempted;
   }
-
+  
   public int getNumAMContainersPreempted() {
     return numAMContainerPreempted;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 163f707..81491b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -41,9 +41,8 @@ public class SchedulerInfo {
   protected EnumSet<SchedulerResourceTypes> schedulingResourceTypes;
   protected int maximumClusterPriority;
 
-  // JAXB needs this
   public SchedulerInfo() {
-  }
+  } // JAXB needs this
 
   public SchedulerInfo(final ResourceManager rm) {
     ResourceScheduler rs = rm.getResourceScheduler();
@@ -75,10 +74,7 @@ public class SchedulerInfo {
   }
 
   public String getSchedulerResourceTypes() {
-    if (minAllocResource != null) {
-      return Arrays.toString(minAllocResource.getResource().getResources());
-    }
-    return null;
+    return Arrays.toString(minAllocResource.getResource().getResources());
   }
 
   public int getMaxClusterLevelAppPriority() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 70f83ab..481645b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -48,9 +48,6 @@ import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Test;
 
-/**
- * Tests fair scheduler configuration.
- */
 public class TestFairSchedulerConfiguration {
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
@@ -105,152 +102,67 @@ public class TestFairSchedulerConfiguration {
 
   @Test
   public void testParseResourceConfigValue() throws Exception {
-    Resource expected = BuilderUtils.newResource(5 * 1024, 2);
-    Resource clusterResource = BuilderUtils.newResource(10 * 1024, 4);
-
-    assertEquals(expected,
-        parseResourceConfigValue("2 vcores, 5120 mb").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120 mb, 2 vcores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("2vcores,5120mb").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120mb,2vcores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120mb   mb, 2    vcores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120 Mb, 2 vCores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("  5120 mb, 2 vcores  ").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("  5120.3 mb, 2.35 vcores  ").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("  5120. mb, 2. vcores  ").getResource());
-
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("2 vcores, 1024 mb").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024 mb, 2 vcores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("2vcores,1024mb").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024mb,2vcores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024   mb, 2    vcores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024 Mb, 2 vCores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("  1024 mb, 2 vcores  ").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("  1024.3 mb, 2.35 vcores  ").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("  1024. mb, 2. vcores  ").getResource());
+
+    Resource clusterResource = BuilderUtils.newResource(2048, 4);
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50% memory, 50% cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50% Memory, 50% CpU").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("50%").getResource(clusterResource));
+    assertEquals(BuilderUtils.newResource(1024, 4),
         parseResourceConfigValue("50% memory, 100% cpu").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
+    assertEquals(BuilderUtils.newResource(1024, 4),
         parseResourceConfigValue(" 100% cpu, 50% memory").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(5 * 1024, 0),
+    assertEquals(BuilderUtils.newResource(1024, 0),
         parseResourceConfigValue("50% memory, 0% cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50 % memory, 50 % cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50%memory,50%cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("  50  %  memory,  50  %  cpu  ").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50.% memory, 50.% cpu").
             getResource(clusterResource));
+
+    clusterResource =  BuilderUtils.newResource(1024 * 10, 4);
     assertEquals(BuilderUtils.newResource((int)(1024 * 10 * 0.109), 2),
         parseResourceConfigValue("10.9% memory, 50.6% cpu").
             getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("50%").getResource(clusterResource));
-
-    Configuration conf = new Configuration();
-
-    conf.set(YarnConfiguration.RESOURCE_TYPES, "test1");
-    ResourceUtils.resetResourceTypes(conf);
-
-    clusterResource = BuilderUtils.newResource(10 * 1024, 4);
-    expected = BuilderUtils.newResource(5 * 1024, 2);
-    expected.setResourceValue("test1", Long.MAX_VALUE);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2, memory-mb=5120").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=5120, vcores=2").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2,memory-mb=5120").getResource());
-    assertEquals(expected, parseResourceConfigValue(" vcores = 2 , "
-            + "memory-mb = 5120 ").getResource());
-
-    expected.setResourceValue("test1", 0L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2, memory-mb=5120", 0L).getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=5120, vcores=2", 0L).getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2,memory-mb=5120", 0L).getResource());
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 ",
-            0L).getResource());
-
-    clusterResource.setResourceValue("test1", 8L);
-    expected.setResourceValue("test1", 4L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("50%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2, memory-mb=5120, "
-            + "test1=4").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("test1=4, vcores=2, "
-            + "memory-mb=5120").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=5120, test1=4, "
-            + "vcores=2").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2,memory-mb=5120,"
-            + "test1=4").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 , "
-            + "test1 = 4 ").getResource());
-
-    expected = BuilderUtils.newResource(4 * 1024, 3);
-    expected.setResourceValue("test1", 8L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%, "
-            + "memory-mb=40%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=40%, "
-            + "vcores=75%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%,"
-            + "memory-mb=40%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 75 % , "
-            + "memory-mb = 40 % ").getResource(clusterResource));
-
-    expected.setResourceValue("test1", 4L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%, memory-mb=40%, "
-            + "test1=50%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("test1=50%, vcores=75%, "
-            + "memory-mb=40%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=40%, test1=50%, "
-            + "vcores=75%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%,memory-mb=40%,"
-            + "test1=50%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
-            + "test1 = 50 % ").getResource(clusterResource));
   }
-
+  
   @Test(expected = AllocationConfigurationException.class)
   public void testNoUnits() throws Exception {
     parseResourceConfigValue("1024");
   }
-
+  
   @Test(expected = AllocationConfigurationException.class)
   public void testOnlyMemory() throws Exception {
     parseResourceConfigValue("1024mb");
@@ -260,7 +172,7 @@ public class TestFairSchedulerConfiguration {
   public void testOnlyCPU() throws Exception {
     parseResourceConfigValue("1024vcores");
   }
-
+  
   @Test(expected = AllocationConfigurationException.class)
   public void testGibberish() throws Exception {
     parseResourceConfigValue("1o24vc0res");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 3902889..0702d65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -53,7 +53,11 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.*;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -72,12 +76,11 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
+import org.eclipse.jetty.server.Response;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -93,8 +96,6 @@ import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 public class TestRMWebServices extends JerseyTestBase {
-  private static final Logger LOG =
-          LoggerFactory.getLogger(TestRMWebServices.class);
 
   private static MockRM rm;
 
@@ -471,19 +472,19 @@ public class TestRMWebServices extends JerseyTestBase {
     QueueMetrics metrics = rs.getRootQueueMetrics();
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
 
-    long totalMBExpect =
+    long totalMBExpect = 
         metrics.getAvailableMB() + metrics.getAllocatedMB();
-    long totalVirtualCoresExpect =
+    long totalVirtualCoresExpect = 
         metrics.getAvailableVirtualCores() + metrics.getAllocatedVirtualCores();
-    assertEquals("appsSubmitted doesn't match",
+    assertEquals("appsSubmitted doesn't match", 
         metrics.getAppsSubmitted(), submittedApps);
-    assertEquals("appsCompleted doesn't match",
+    assertEquals("appsCompleted doesn't match", 
         metrics.getAppsCompleted(), completedApps);
     assertEquals("reservedMB doesn't match",
         metrics.getReservedMB(), reservedMB);
-    assertEquals("availableMB doesn't match",
+    assertEquals("availableMB doesn't match", 
         metrics.getAvailableMB(), availableMB);
-    assertEquals("allocatedMB doesn't match",
+    assertEquals("allocatedMB doesn't match", 
         metrics.getAllocatedMB(), allocMB);
     assertEquals("reservedVirtualCores doesn't match",
         metrics.getReservedVirtualCores(), reservedVirtualCores);
@@ -596,13 +597,11 @@ public class TestRMWebServices extends JerseyTestBase {
 
   public void verifyClusterSchedulerFifo(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements in: " + json, 1, json.length());
+    assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements in: " + info, 1, info.length());
+    assertEquals("incorrect number of elements", 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-
-    LOG.debug("schedulerInfo: {}", info);
-    assertEquals("incorrect number of elements in: " + info, 11, info.length());
+    assertEquals("incorrect number of elements", 11, info.length());
 
     verifyClusterSchedulerFifoGeneric(info.getString("type"),
         info.getString("qstate"), (float) info.getDouble("capacity"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 15f94e1..6c6f400 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -79,7 +79,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 public class TestRMWebServicesApps extends JerseyTestBase {
 
   private static MockRM rm;
-
+  
   private static final int CONTAINER_MB = 1024;
 
   private static class WebServletModule extends ServletModule {
@@ -324,7 +324,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present",
+    assertTrue("both app states of ACCEPTED and KILLED are not present", 
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
@@ -375,12 +375,12 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present",
+    assertTrue("both app states of ACCEPTED and KILLED are not present", 
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
         array.getJSONObject(1).getString("state").equals("ACCEPTED")));
-
+    
     rm.stop();
   }
 
@@ -511,8 +511,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("finalStatus",
-                    FinalApplicationStatus.UNDEFINED.toString())
+        .path("apps").queryParam("finalStatus", FinalApplicationStatus.UNDEFINED.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
@@ -1805,8 +1804,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     int numAttempt = 1;
     while (true) {
       // fail the AM by sending CONTAINER_FINISHED event without registering.
-      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1,
-              ContainerState.COMPLETE);
+      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
       rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FAILED);
       if (numAttempt == maxAppAttempts) {
         rm.waitForState(app1.getApplicationId(), RMAppState.FAILED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
deleted file mode 100644
index 83e0056..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
-import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler.CustomResourceTypesConfigurationProvider;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoJsonVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoXmlVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.BufferedClientResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.JsonCustomResourceTypeTestcase;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsJsonVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsXmlVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
-import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
-import org.apache.hadoop.yarn.webapp.JerseyTestBase;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.junit.Before;
-import org.junit.Test;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import javax.ws.rs.core.MediaType;
-import java.util.ArrayList;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This test verifies that custom resource types are correctly serialized to XML
- * and JSON when HTTP GET request is sent to the resource: ws/v1/cluster/apps.
- */
-public class TestRMWebServicesAppsCustomResourceTypes extends JerseyTestBase {
-
-  private static MockRM rm;
-  private static final int CONTAINER_MB = 1024;
-
-  private static class WebServletModule extends ServletModule {
-    @Override
-    protected void configureServlets() {
-      bind(JAXBContextResolver.class);
-      bind(RMWebServices.class);
-      bind(GenericExceptionHandler.class);
-      Configuration conf = new Configuration();
-      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
-          YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
-          ResourceScheduler.class);
-      initResourceTypes(conf);
-      rm = new MockRM(conf);
-      bind(ResourceManager.class).toInstance(rm);
-      serve("/*").with(GuiceContainer.class);
-    }
-
-    private void initResourceTypes(Configuration conf) {
-      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
-          CustomResourceTypesConfigurationProvider.class.getName());
-      ResourceUtils.resetResourceTypes(conf);
-    }
-  }
-
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    createInjectorForWebServletModule();
-  }
-
-  private void createInjectorForWebServletModule() {
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
-  public TestRMWebServicesAppsCustomResourceTypes() {
-    super(new WebAppDescriptor.Builder(
-        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-            .contextListenerClass(GuiceServletConfig.class)
-            .filterClass(com.google.inject.servlet.GuiceFilter.class)
-            .contextPath("jersey-guice-filter").servletPath("/").build());
-  }
-
-  @Test
-  public void testRunningAppXml() throws Exception {
-    rm.start();
-    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
-    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
-    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
-    am1.allocate("*", 2048, 1, new ArrayList<>());
-    amNodeManager.nodeHeartbeat(true);
-
-    WebResource r = resource();
-    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-
-    XmlCustomResourceTypeTestCase testCase =
-            new XmlCustomResourceTypeTestCase(path,
-                    new BufferedClientResponse(response));
-    testCase.verify(document -> {
-      NodeList apps = document.getElementsByTagName("apps");
-      assertEquals("incorrect number of apps elements", 1, apps.getLength());
-
-      NodeList appArray = ((Element)(apps.item(0)))
-              .getElementsByTagName("app");
-      assertEquals("incorrect number of app elements", 1, appArray.getLength());
-
-      verifyAppsXML(appArray, app1);
-    });
-
-    rm.stop();
-  }
-
-  @Test
-  public void testRunningAppJson() throws Exception {
-    rm.start();
-    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
-    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
-    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
-    am1.allocate("*", 2048, 1, new ArrayList<>());
-    amNodeManager.nodeHeartbeat(true);
-
-    WebResource r = resource();
-    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-
-    JsonCustomResourceTypeTestcase testCase =
-        new JsonCustomResourceTypeTestcase(path,
-            new BufferedClientResponse(response));
-    testCase.verify(json -> {
-      try {
-        assertEquals("incorrect number of apps elements", 1, json.length());
-        JSONObject apps = json.getJSONObject("apps");
-        assertEquals("incorrect number of app elements", 1, apps.length());
-        JSONArray array = apps.getJSONArray("app");
-        assertEquals("incorrect count of app", 1, array.length());
-
-        verifyAppInfoJson(array.getJSONObject(0), app1);
-      } catch (JSONException e) {
-        throw new RuntimeException(e);
-      }
-    });
-
-    rm.stop();
-  }
-
-  private void verifyAppsXML(NodeList appArray, RMApp app) {
-    for (int i = 0; i < appArray.getLength(); i++) {
-      Element element = (Element) appArray.item(i);
-      AppInfoXmlVerifications.verify(element, app);
-
-      NodeList resourceRequests =
-          element.getElementsByTagName("resourceRequests");
-      assertEquals(1, resourceRequests.getLength());
-      Node resourceRequest = resourceRequests.item(0);
-      ResourceRequest rr =
-          ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
-              .getApplicationAttempt(
-                  app.getCurrentAppAttempt().getAppAttemptId())
-              .getAppSchedulingInfo().getAllResourceRequests().get(0);
-      ResourceRequestsXmlVerifications.verifyWithCustomResourceTypes(
-              (Element) resourceRequest, rr,
-          CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-    }
-  }
-
-  private void verifyAppInfoJson(JSONObject info, RMApp app) throws
-          JSONException {
-    int expectedNumberOfElements = getExpectedNumberOfElements(app);
-
-    assertEquals("incorrect number of elements", expectedNumberOfElements,
-        info.length());
-
-    AppInfoJsonVerifications.verify(info, app);
-
-    JSONArray resourceRequests = info.getJSONArray("resourceRequests");
-    JSONObject requestInfo = resourceRequests.getJSONObject(0);
-    ResourceRequest rr =
-        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
-            .getApplicationAttempt(app.getCurrentAppAttempt().getAppAttemptId())
-            .getAppSchedulingInfo().getAllResourceRequests().get(0);
-
-    ResourceRequestsJsonVerifications.verifyWithCustomResourceTypes(
-            requestInfo, rr,
-            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  private int getExpectedNumberOfElements(RMApp app) {
-    int expectedNumberOfElements = 40 + 2; // 2 -> resourceRequests
-    if (app.getApplicationSubmissionContext()
-        .getNodeLabelExpression() != null) {
-      expectedNumberOfElements++;
-    }
-
-    if (app.getAMResourceRequests().get(0).getNodeLabelExpression() != null) {
-      expectedNumberOfElements++;
-    }
-
-    if (AppInfo
-        .getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()) != null) {
-      expectedNumberOfElements++;
-    }
-    return expectedNumberOfElements;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index 46d0a66..e37f76f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -146,7 +146,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     config.setUserLimitFactor(B2, 100.0f);
     config.setCapacity(B3, 0.5f);
     config.setUserLimitFactor(B3, 100.0f);
-
+    
     config.setQueues(A1, new String[] {"a1a", "a1b"});
     final String A1A = A1 + ".a1a";
     config.setCapacity(A1A, 85);
@@ -254,7 +254,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     }
   }
 
-  public void verifySubQueueXML(Element qElem, String q,
+  public void verifySubQueueXML(Element qElem, String q, 
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws Exception {
     NodeList children = qElem.getChildNodes();
@@ -317,34 +317,30 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
   private void verifyClusterScheduler(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements in: " + json, 1, json.length());
+    assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements in: " + info, 1, info.length());
+    assertEquals("incorrect number of elements", 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements in: " + info, 8, info.length());
+    assertEquals("incorrect number of elements", 8, info.length());
     verifyClusterSchedulerGeneric(info.getString("type"),
         (float) info.getDouble("usedCapacity"),
         (float) info.getDouble("capacity"),
         (float) info.getDouble("maxCapacity"), info.getString("queueName"));
     JSONObject health = info.getJSONObject("health");
     assertNotNull(health);
-    assertEquals("incorrect number of elements in: " + health, 3,
-        health.length());
+    assertEquals("incorrect number of elements", 3, health.length());
     JSONArray operationsInfo = health.getJSONArray("operationsInfo");
-    assertEquals("incorrect number of elements in: " + health, 4,
-        operationsInfo.length());
+    assertEquals("incorrect number of elements", 4, operationsInfo.length());
     JSONArray lastRunDetails = health.getJSONArray("lastRunDetails");
-    assertEquals("incorrect number of elements in: " + health, 3,
-        lastRunDetails.length());
+    assertEquals("incorrect number of elements", 3, lastRunDetails.length());
 
     JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
-    assertEquals("incorrect number of elements in: " + arr, 2, arr.length());
+    assertEquals("incorrect number of elements", 2, arr.length());
 
     // test subqueues
     for (int i = 0; i < arr.length(); i++) {
       JSONObject obj = arr.getJSONObject(i);
-      String q = CapacitySchedulerConfiguration.ROOT + "." +
-              obj.getString("queueName");
+      String q = CapacitySchedulerConfiguration.ROOT + "." + obj.getString("queueName");
       verifySubQueue(obj, q, 100, 100);
     }
   }
@@ -359,7 +355,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     assertTrue("queueName doesn't match", "root".matches(queueName));
   }
 
-  private void verifySubQueue(JSONObject info, String q,
+  private void verifySubQueue(JSONObject info, String q, 
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws JSONException, Exception {
     int numExpectedElements = 20;
@@ -468,7 +464,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
   }
 
-  //Return a child Node of node with the tagname or null if none exists
+  //Return a child Node of node with the tagname or null if none exists 
   private Node getChildNodeByName(Node node, String tagname) {
     NodeList nodeList = node.getChildNodes();
     for (int i=0; i < nodeList.getLength(); ++i) {
@@ -518,7 +514,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
           for (int j=0; j<users.getLength(); ++j) {
             Node user = users.item(j);
             String username = getChildNodeByName(user, "username")
-                .getTextContent();
+              .getTextContent(); 
             assertTrue(username.equals("user1") || username.equals("user2"));
             //Should be a parsable integer
             Integer.parseInt(getChildNodeByName(getChildNodeByName(user,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 99b5648..3d28f12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -42,8 +42,6 @@ import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response.Status;
@@ -61,8 +59,6 @@ import static org.junit.Assert.assertNull;
  * Test scheduler configuration mutation via REST API.
  */
 public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
-  private static final Logger LOG = LoggerFactory
-          .getLogger(TestRMWebServicesConfigurationMutation.class);
 
   private static final File CONF_FILE = new File(new File("target",
       "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
@@ -400,7 +396,6 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
             .entity(YarnWebServiceUtils.toJson(updateInfo,
                 SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
-    LOG.debug("Response headers: " + response.getHeaders());
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
     assertEquals(0.2f, newCSConf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index 58c72ee..e77785b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,14 +16,13 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import javax.ws.rs.core.MediaType;
 
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -31,9 +30,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
-
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -42,18 +38,18 @@ import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Before;
 import org.junit.Test;
-import javax.ws.rs.core.MediaType;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
 
-/**
- * Tests RM Webservices fair scheduler resources.
- */
 public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   private static MockRM rm;
   private static YarnConfiguration conf;
-
+  
   private static class WebServletModule extends ServletModule {
     @Override
     protected void configureServlets() {
@@ -62,7 +58,7 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
       bind(GenericExceptionHandler.class);
       conf = new YarnConfiguration();
       conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-          ResourceScheduler.class);
+        ResourceScheduler.class);
       rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       serve("/*").with(GuiceContainer.class);
@@ -70,32 +66,32 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   static {
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig.setInjector(
+        Guice.createInjector(new WebServletModule()));
   }
 
   @Before
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig.setInjector(
+        Guice.createInjector(new WebServletModule()));
   }
 
   public TestRMWebServicesFairScheduler() {
     super(new WebAppDescriptor.Builder(
         "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-            .contextListenerClass(GuiceServletConfig.class)
-            .filterClass(com.google.inject.servlet.GuiceFilter.class)
-            .contextPath("jersey-guice-filter").servletPath("/").build());
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
   }
-
+  
   @Test
-  public void testClusterScheduler() throws JSONException {
+  public void testClusterScheduler() throws JSONException, Exception {
     WebResource r = resource();
-    ClientResponse response =
-        r.path("ws").path("v1").path("cluster").path("scheduler")
-            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("scheduler").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -103,51 +99,52 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   @Test
-  public void testClusterSchedulerSlash() throws JSONException {
+  public void testClusterSchedulerSlash() throws JSONException, Exception {
     WebResource r = resource();
-    ClientResponse response =
-        r.path("ws").path("v1").path("cluster").path("scheduler/")
-            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("scheduler/").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     verifyClusterScheduler(json);
   }
-
+  
   @Test
-  public void testClusterSchedulerWithSubQueues()
-      throws JSONException {
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+  public void testClusterSchedulerWithSubQueues() throws JSONException,
+      Exception {
+    FairScheduler scheduler = (FairScheduler)rm.getResourceScheduler();
     QueueManager queueManager = scheduler.getQueueManager();
     // create LeafQueue
     queueManager.getLeafQueue("root.q.subqueue1", true);
     queueManager.getLeafQueue("root.q.subqueue2", true);
 
     WebResource r = resource();
-    ClientResponse response =
-        r.path("ws").path("v1").path("cluster").path("scheduler")
-            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("scheduler").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     JSONArray subQueueInfo = json.getJSONObject("scheduler")
         .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-        .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(1)
-        .getJSONObject("childQueues").getJSONArray("queue");
+        .getJSONObject("childQueues").getJSONArray("queue")
+        .getJSONObject(1).getJSONObject("childQueues").getJSONArray("queue");
     // subQueueInfo is consist of subqueue1 and subqueue2 info
     assertEquals(2, subQueueInfo.length());
 
     // Verify 'childQueues' field is omitted from FairSchedulerLeafQueueInfo.
     try {
       subQueueInfo.getJSONObject(1).getJSONObject("childQueues");
-      fail("FairSchedulerQueueInfo should omit field 'childQueues'"
-          + "if child queue is empty.");
+      fail("FairSchedulerQueueInfo should omit field 'childQueues'" +
+           "if child queue is empty.");
     } catch (JSONException je) {
       assertEquals("JSONObject[\"childQueues\"] not found.", je.getMessage());
     }
   }
 
-  private void verifyClusterScheduler(JSONObject json) throws JSONException {
+  private void verifyClusterScheduler(JSONObject json) throws JSONException,
+      Exception {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
     assertEquals("incorrect number of elements", 1, info.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
index 40cf483..1e61186 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
@@ -457,7 +457,7 @@ public class TestRMWebServicesSchedulerActivities
       if (object.getClass() == JSONObject.class) {
         assertEquals("Number of allocations is wrong", 1, realValue);
       } else if (object.getClass() == JSONArray.class) {
-        assertEquals("Number of allocations is wrong in: " + object,
+        assertEquals("Number of allocations is wrong",
             ((JSONArray) object).length(), realValue);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
deleted file mode 100644
index bb1fce0..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.LocalConfigurationProvider;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import static java.util.stream.Collectors.toList;
-
-/**
- * This class can generate an XML configuration file of custom resource types.
- * See createInitialResourceTypes for the default values. All custom resource
- * type is prefixed with CUSTOM_RESOURCE_PREFIX. Please use the
- * getConfigurationInputStream method to get an InputStream of the XML. If you
- * want to have different number of resources in your tests, please see usages
- * of this class in this test class:
- * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}
- *
- */
-public class CustomResourceTypesConfigurationProvider
-    extends LocalConfigurationProvider {
-
-  private static class CustomResourceTypes {
-    private int count;
-    private String xml;
-
-    CustomResourceTypes(String xml, int count) {
-      this.xml = xml;
-      this.count = count;
-    }
-
-    public int getCount() {
-      return count;
-    }
-
-    public String getXml() {
-      return xml;
-    }
-  }
-
-  private static final String CUSTOM_RESOURCE_PREFIX = "customResource-";
-
-  private static CustomResourceTypes customResourceTypes =
-      createInitialResourceTypes();
-
-  private static CustomResourceTypes createInitialResourceTypes() {
-    return createCustomResourceTypes(2);
-  }
-
-  private static CustomResourceTypes createCustomResourceTypes(int count) {
-    List<String> resourceTypeNames = generateResourceTypeNames(count);
-
-    List<String> resourceUnitXmlElements = IntStream.range(0, count)
-            .boxed()
-            .map(i -> getResourceUnitsXml(resourceTypeNames.get(i)))
-            .collect(toList());
-
-    StringBuilder sb = new StringBuilder("<configuration>\n");
-    sb.append(getResourceTypesXml(resourceTypeNames));
-
-    for (String resourceUnitXml : resourceUnitXmlElements) {
-      sb.append(resourceUnitXml);
-
-    }
-    sb.append("</configuration>");
-
-    return new CustomResourceTypes(sb.toString(), count);
-  }
-
-  private static List<String> generateResourceTypeNames(int count) {
-    return IntStream.range(0, count)
-            .boxed()
-            .map(i -> CUSTOM_RESOURCE_PREFIX + i)
-            .collect(toList());
-  }
-
-  private static String getResourceUnitsXml(String resource) {
-    return "<property>\n" + "<name>yarn.resource-types." + resource
-        + ".units</name>\n" + "<value>k</value>\n" + "</property>\n";
-  }
-
-  private static String getResourceTypesXml(List<String> resources) {
-    final String resourceTypes = makeCommaSeparatedString(resources);
-
-    return "<property>\n" + "<name>yarn.resource-types</name>\n" + "<value>"
-        + resourceTypes + "</value>\n" + "</property>\n";
-  }
-
-  private static String makeCommaSeparatedString(List<String> resources) {
-    return resources.stream().collect(Collectors.joining(","));
-  }
-
-  @Override
-  public InputStream getConfigurationInputStream(Configuration bootstrapConf,
-      String name) throws YarnException, IOException {
-    if (YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE.equals(name)) {
-      return new ByteArrayInputStream(
-          customResourceTypes.getXml().getBytes());
-    } else {
-      return super.getConfigurationInputStream(bootstrapConf, name);
-    }
-  }
-
-  public static void reset() {
-    customResourceTypes = createInitialResourceTypes();
-  }
-
-  public static void setNumberOfResourceTypes(int count) {
-    customResourceTypes = createCustomResourceTypes(count);
-  }
-
-  public static List<String> getCustomResourceTypes() {
-    return generateResourceTypeNames(customResourceTypes.getCount());
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: Merge branch 'HDDS-48-merge' into trunk

Posted by bh...@apache.org.
Merge branch 'HDDS-48-merge' into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/021ab63c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/021ab63c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/021ab63c

Branch: refs/heads/HDDS-48
Commit: 021ab63c350b2e16fca0a40071a492838b0301f7
Parents: 4a08ddf f870f0d
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Jul 9 12:54:14 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Jul 9 12:54:14 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |    1 +
 .../scm/storage/ContainerProtocolCalls.java     |   24 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |   12 +
 .../org/apache/hadoop/ozone/OzoneConsts.java    |   18 +
 .../org/apache/hadoop/ozone/common/Storage.java |    3 +-
 .../hadoop/utils/MetadataStoreBuilder.java      |   40 +-
 .../main/proto/DatanodeContainerProtocol.proto  |   18 +-
 .../common/src/main/resources/ozone-default.xml |   33 +
 .../apache/hadoop/ozone/TestMetadataStore.java  |  484 --------
 .../apache/hadoop/utils/TestMetadataStore.java  |  533 +++++++++
 hadoop-hdds/container-service/pom.xml           |    6 +
 .../container/common/DataNodeLayoutVersion.java |   80 ++
 .../container/common/helpers/ChunkUtils.java    |  344 ------
 .../container/common/helpers/ContainerData.java |  512 --------
 .../common/helpers/ContainerMetrics.java        |    2 +-
 .../common/helpers/ContainerUtils.java          |  328 ++---
 .../common/helpers/DatanodeVersionFile.java     |   95 ++
 .../container/common/helpers/FileUtils.java     |   82 --
 .../container/common/helpers/KeyUtils.java      |  148 ---
 .../common/impl/ChunkLayOutVersion.java         |   98 ++
 .../container/common/impl/ChunkManagerImpl.java |  233 ----
 .../container/common/impl/ContainerData.java    |  477 ++++++++
 .../common/impl/ContainerDataYaml.java          |  277 +++++
 .../impl/ContainerLocationManagerImpl.java      |  158 ---
 .../common/impl/ContainerManagerImpl.java       | 1120 ------------------
 .../container/common/impl/ContainerSet.java     |  255 ++++
 .../common/impl/ContainerStorageLocation.java   |  212 ----
 .../ozone/container/common/impl/Dispatcher.java |  687 -----------
 .../container/common/impl/HddsDispatcher.java   |  208 ++++
 .../container/common/impl/KeyManagerImpl.java   |  204 ----
 .../RandomContainerDeletionChoosingPolicy.java  |    1 -
 ...NOrderedContainerDeletionChoosingPolicy.java |    1 -
 .../common/interfaces/ChunkManager.java         |   73 --
 .../container/common/interfaces/Container.java  |  100 ++
 .../ContainerDeletionChoosingPolicy.java        |    3 +-
 .../common/interfaces/ContainerDispatcher.java  |   14 +
 .../common/interfaces/ContainerManager.java     |  269 -----
 .../container/common/interfaces/Handler.java    |   74 ++
 .../container/common/interfaces/KeyManager.java |   73 --
 .../common/interfaces/VolumeChoosingPolicy.java |   46 +
 .../statemachine/DatanodeStateMachine.java      |    4 +-
 .../background/BlockDeletingService.java        |  246 ----
 .../statemachine/background/package-info.java   |   18 -
 .../DeleteBlocksCommandHandler.java             |   61 +-
 .../states/datanode/RunningDatanodeState.java   |    3 +-
 .../states/endpoint/VersionEndpointTask.java    |   31 +-
 .../server/ratis/ContainerStateMachine.java     |    5 +-
 .../container/common/utils/ContainerCache.java  |    6 +-
 .../container/common/utils/HddsVolumeUtil.java  |  163 +++
 .../container/common/volume/HddsVolume.java     |  342 ++++++
 .../volume/RoundRobinVolumeChoosingPolicy.java  |   83 ++
 .../container/common/volume/VolumeIOStats.java  |  139 +++
 .../container/common/volume/VolumeInfo.java     |  132 +++
 .../container/common/volume/VolumeSet.java      |  357 ++++++
 .../container/common/volume/VolumeUsage.java    |  198 ++++
 .../container/common/volume/package-info.java   |   21 +
 .../container/keyvalue/KeyValueContainer.java   |  483 ++++++++
 .../keyvalue/KeyValueContainerData.java         |  280 +++++
 .../container/keyvalue/KeyValueHandler.java     |  694 +++++++++++
 .../container/keyvalue/helpers/ChunkUtils.java  |  357 ++++++
 .../container/keyvalue/helpers/KeyUtils.java    |  147 +++
 .../helpers/KeyValueContainerLocationUtil.java  |  140 +++
 .../keyvalue/helpers/KeyValueContainerUtil.java |  316 +++++
 .../keyvalue/helpers/SmallFileUtils.java        |   84 ++
 .../keyvalue/helpers/package-info.java          |   21 +
 .../keyvalue/impl/ChunkManagerImpl.java         |  254 ++++
 .../container/keyvalue/impl/KeyManagerImpl.java |  198 ++++
 .../container/keyvalue/impl/package-info.java   |   22 +
 .../keyvalue/interfaces/ChunkManager.java       |   80 ++
 .../keyvalue/interfaces/KeyManager.java         |   76 ++
 .../ozone/container/keyvalue/package-info.java  |   21 +
 .../background/BlockDeletingService.java        |  248 ++++
 .../statemachine/background/package-info.java   |   18 +
 .../container/ozoneimpl/ContainerReader.java    |  177 +++
 .../container/ozoneimpl/OzoneContainer.java     |  396 +++----
 .../hadoop/ozone/protocol/VersionResponse.java  |    4 +
 .../ozone/container/common/SCMTestUtils.java    |   13 +-
 .../ozone/container/common/ScmTestMock.java     |    4 +
 .../common/TestChunkLayOutVersion.java          |   42 +
 .../common/TestDatanodeLayOutVersion.java       |   38 +
 .../common/TestKeyValueContainerData.java       |   85 ++
 .../common/helpers/TestDatanodeVersionFile.java |  134 +++
 .../common/impl/TestContainerDataYaml.java      |  167 +++
 .../container/common/impl/TestContainerSet.java |  179 +++
 .../common/interfaces/TestHandler.java          |   89 ++
 .../container/common/volume/TestHddsVolume.java |  145 +++
 .../TestRoundRobinVolumeChoosingPolicy.java     |  127 ++
 .../container/common/volume/TestVolumeSet.java  |  153 +++
 .../keyvalue/TestChunkManagerImpl.java          |  290 +++++
 .../container/keyvalue/TestKeyManagerImpl.java  |  189 +++
 .../keyvalue/TestKeyValueContainer.java         |  267 +++++
 .../container/keyvalue/TestKeyValueHandler.java |  201 ++++
 .../container/ozoneimpl/TestOzoneContainer.java |  108 ++
 .../testutils/BlockDeletingServiceTestImpl.java |    9 +-
 .../test/resources/additionalfields.container   |   11 +
 .../src/test/resources/incorrect.container      |   10 +
 hadoop-hdds/pom.xml                             |    2 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |    4 +
 .../ozone/container/common/TestEndPoint.java    |   20 +-
 .../fsdataset/VolumeChoosingPolicy.java         |    2 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |    6 +-
 .../client/io/OzoneContainerTranslation.java    |   50 -
 .../TestStorageContainerManagerHelper.java      |   11 +-
 .../ozone/container/ContainerTestHelper.java    |   27 +-
 .../common/TestBlockDeletingService.java        |  118 +-
 .../TestContainerDeletionChoosingPolicy.java    |   63 +-
 .../common/impl/TestContainerPersistence.java   |  478 ++++----
 .../commandhandler/TestBlockDeletion.java       |   32 +-
 .../TestCloseContainerByPipeline.java           |    6 +-
 .../TestCloseContainerHandler.java              |    6 +-
 .../container/metrics/TestContainerMetrics.java |   60 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   19 +-
 .../container/server/TestContainerServer.java   |   24 +-
 ...TestGenerateOzoneRequiredConfigurations.java |    5 +-
 .../ozone/om/TestContainerReportWithKeys.java   |   14 +-
 .../ozone/scm/TestContainerSmallFile.java       |    2 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |   62 +-
 .../hadoop/ozone/web/client/TestKeys.java       |   24 +-
 .../genesis/BenchMarkDatanodeDispatcher.java    |   41 +-
 119 files changed, 10522 insertions(+), 5986 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/061b1685
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/061b1685
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/061b1685

Branch: refs/heads/HDDS-48
Commit: 061b168529a9cd5d6a3a482c890bacdb49186368
Parents: e4bf38c
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Jul 6 12:09:05 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Jul 6 12:09:05 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |    4 +
 dev-support/bin/ozone-dist-layout-stitching     |    2 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |    6 +-
 .../src/main/compose/ozone/docker-compose.yaml  |    6 +-
 .../src/main/compose/ozone/docker-config        |    2 +-
 .../src/main/compose/ozoneperf/README.md        |    4 +-
 .../main/compose/ozoneperf/docker-compose.yaml  |    6 +-
 .../src/main/compose/ozoneperf/docker-config    |    2 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |    2 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java    |   22 +-
 .../org/apache/hadoop/ozone/common/Storage.java |    6 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |   10 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |    6 +-
 .../common/src/main/resources/ozone-default.xml |  120 +-
 .../src/main/resources/webapps/static/ozone.js  |    4 +-
 .../webapps/static/templates/config.html        |    4 +-
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |    2 +-
 .../scm/container/states/ContainerStateMap.java |    6 +-
 .../hadoop/hdds/scm/node/CommandQueue.java      |    2 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java |    2 +-
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java       |    2 +-
 .../src/test/acceptance/basic/basic.robot       |    6 +-
 .../test/acceptance/basic/docker-compose.yaml   |    8 +-
 .../src/test/acceptance/basic/docker-config     |    4 +-
 .../src/test/acceptance/basic/ozone-shell.robot |   18 +-
 .../src/test/acceptance/commonlib.robot         |    4 +-
 .../test/acceptance/ozonefs/docker-compose.yaml |    8 +-
 .../src/test/acceptance/ozonefs/docker-config   |    4 +-
 .../src/test/acceptance/ozonefs/ozonefs.robot   |    6 +-
 .../apache/hadoop/ozone/client/BucketArgs.java  |    4 +-
 .../hadoop/ozone/client/OzoneClientFactory.java |   86 +-
 .../apache/hadoop/ozone/client/OzoneKey.java    |    2 +-
 .../apache/hadoop/ozone/client/VolumeArgs.java  |    4 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |   24 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   54 +-
 .../client/rest/DefaultRestServerSelector.java  |    2 +-
 .../hadoop/ozone/client/rest/RestClient.java    |   15 +-
 .../ozone/client/rest/RestServerSelector.java   |    2 +-
 .../hadoop/ozone/client/rpc/RpcClient.java      |  142 +-
 .../ozone/client/TestHddsClientUtils.java       |   24 +-
 hadoop-ozone/common/pom.xml                     |    2 +-
 hadoop-ozone/common/src/main/bin/ozone          |    9 +-
 hadoop-ozone/common/src/main/bin/start-ozone.sh |   16 +-
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |   16 +-
 .../java/org/apache/hadoop/ozone/KsmUtils.java  |   95 --
 .../java/org/apache/hadoop/ozone/OmUtils.java   |   94 ++
 .../apache/hadoop/ozone/freon/OzoneGetConf.java |   16 +-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  |   81 --
 .../hadoop/ozone/ksm/helpers/KsmBucketArgs.java |  233 ---
 .../hadoop/ozone/ksm/helpers/KsmBucketInfo.java |  235 ---
 .../hadoop/ozone/ksm/helpers/KsmKeyArgs.java    |  119 --
 .../hadoop/ozone/ksm/helpers/KsmKeyInfo.java    |  277 ----
 .../ozone/ksm/helpers/KsmKeyLocationInfo.java   |  129 --
 .../ksm/helpers/KsmKeyLocationInfoGroup.java    |  118 --
 .../ozone/ksm/helpers/KsmOzoneAclMap.java       |  110 --
 .../hadoop/ozone/ksm/helpers/KsmVolumeArgs.java |  223 ---
 .../ozone/ksm/helpers/OpenKeySession.java       |   50 -
 .../hadoop/ozone/ksm/helpers/ServiceInfo.java   |  237 ---
 .../hadoop/ozone/ksm/helpers/VolumeArgs.java    |  140 --
 .../hadoop/ozone/ksm/helpers/package-info.java  |   18 -
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../ksm/protocol/KeySpaceManagerProtocol.java   |  252 ----
 .../hadoop/ozone/ksm/protocol/package-info.java |   19 -
 ...ceManagerProtocolClientSideTranslatorPB.java |  769 ----------
 .../protocolPB/KeySpaceManagerProtocolPB.java   |   34 -
 .../ozone/ksm/protocolPB/package-info.java      |   19 -
 .../apache/hadoop/ozone/om/OMConfigKeys.java    |   81 ++
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  233 +++
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  235 +++
 .../hadoop/ozone/om/helpers/OmKeyArgs.java      |  119 ++
 .../hadoop/ozone/om/helpers/OmKeyInfo.java      |  277 ++++
 .../ozone/om/helpers/OmKeyLocationInfo.java     |  129 ++
 .../om/helpers/OmKeyLocationInfoGroup.java      |  118 ++
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java  |  110 ++
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  223 +++
 .../hadoop/ozone/om/helpers/OpenKeySession.java |   50 +
 .../hadoop/ozone/om/helpers/ServiceInfo.java    |  237 +++
 .../hadoop/ozone/om/helpers/VolumeArgs.java     |  140 ++
 .../hadoop/ozone/om/helpers/package-info.java   |   18 +
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 .../ozone/om/protocol/OzoneManagerProtocol.java |  252 ++++
 .../hadoop/ozone/om/protocol/package-info.java  |   19 +
 ...neManagerProtocolClientSideTranslatorPB.java |  769 ++++++++++
 .../om/protocolPB/OzoneManagerProtocolPB.java   |   34 +
 .../ozone/om/protocolPB/package-info.java       |   19 +
 .../hadoop/ozone/protocolPB/KSMPBHelper.java    |  113 --
 .../hadoop/ozone/protocolPB/OMPBHelper.java     |  113 ++
 .../main/proto/KeySpaceManagerProtocol.proto    |  474 ------
 .../src/main/proto/OzoneManagerProtocol.proto   |  480 +++++++
 hadoop-ozone/docs/content/GettingStarted.md     |   18 +-
 hadoop-ozone/docs/content/Metrics.md            |   10 +-
 hadoop-ozone/docs/content/_index.md             |   12 +-
 hadoop-ozone/docs/static/OzoneOverview.svg      |    2 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   24 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   66 +-
 .../ozone/TestOzoneConfigurationFields.java     |    4 +-
 .../ozone/TestStorageContainerManager.java      |   18 +-
 .../TestStorageContainerManagerHelper.java      |   12 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |    6 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   18 +-
 .../commandhandler/TestBlockDeletion.java       |   45 +-
 .../TestCloseContainerByPipeline.java           |   35 +-
 .../TestCloseContainerHandler.java              |   14 +-
 .../ozone/ksm/TestContainerReportWithKeys.java  |  143 --
 .../apache/hadoop/ozone/ksm/TestKSMMetrcis.java |  306 ----
 .../apache/hadoop/ozone/ksm/TestKSMSQLCli.java  |  284 ----
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   | 1350 ------------------
 .../ksm/TestKeySpaceManagerRestInterface.java   |  135 --
 .../ozone/ksm/TestKsmBlockVersioning.java       |  253 ----
 .../ksm/TestMultipleContainerReadWrite.java     |  215 ---
 .../ozone/om/TestContainerReportWithKeys.java   |  143 ++
 .../om/TestMultipleContainerReadWrite.java      |  215 +++
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |  253 ++++
 .../apache/hadoop/ozone/om/TestOmMetrics.java   |  313 ++++
 .../apache/hadoop/ozone/om/TestOmSQLCli.java    |  284 ++++
 .../hadoop/ozone/om/TestOzoneManager.java       | 1349 +++++++++++++++++
 .../ozone/om/TestOzoneManagerRestInterface.java |  135 ++
 .../hadoop/ozone/ozShell/TestOzoneShell.java    |   14 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |    3 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |   12 +-
 .../hadoop/ozone/web/client/TestKeys.java       |   58 +-
 .../src/test/resources/webapps/ksm/.gitkeep     |   15 -
 .../resources/webapps/ozoneManager/.gitkeep     |   15 +
 .../server/datanode/ObjectStoreHandler.java     |   33 +-
 .../ozone/web/handlers/KeyProcessTemplate.java  |    4 +-
 .../web/handlers/VolumeProcessTemplate.java     |    4 +-
 .../web/storage/DistributedStorageHandler.java  |  153 +-
 .../apache/hadoop/ozone/ksm/BucketManager.java  |   79 -
 .../hadoop/ozone/ksm/BucketManagerImpl.java     |  315 ----
 .../org/apache/hadoop/ozone/ksm/KSMMXBean.java  |   31 -
 .../hadoop/ozone/ksm/KSMMetadataManager.java    |  253 ----
 .../ozone/ksm/KSMMetadataManagerImpl.java       |  526 -------
 .../org/apache/hadoop/ozone/ksm/KSMMetrics.java |  459 ------
 .../org/apache/hadoop/ozone/ksm/KSMStorage.java |   90 --
 .../hadoop/ozone/ksm/KeyDeletingService.java    |  142 --
 .../org/apache/hadoop/ozone/ksm/KeyManager.java |  175 ---
 .../apache/hadoop/ozone/ksm/KeyManagerImpl.java |  566 --------
 .../hadoop/ozone/ksm/KeySpaceManager.java       |  912 ------------
 .../ozone/ksm/KeySpaceManagerHttpServer.java    |   78 -
 .../hadoop/ozone/ksm/OpenKeyCleanupService.java |  117 --
 .../ozone/ksm/ServiceListJSONServlet.java       |  103 --
 .../apache/hadoop/ozone/ksm/VolumeManager.java  |  100 --
 .../hadoop/ozone/ksm/VolumeManagerImpl.java     |  391 -----
 .../ozone/ksm/exceptions/KSMException.java      |  118 --
 .../ozone/ksm/exceptions/package-info.java      |   19 -
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../apache/hadoop/ozone/om/BucketManager.java   |   79 +
 .../hadoop/ozone/om/BucketManagerImpl.java      |  315 ++++
 .../hadoop/ozone/om/KeyDeletingService.java     |  142 ++
 .../org/apache/hadoop/ozone/om/KeyManager.java  |  175 +++
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  566 ++++++++
 .../org/apache/hadoop/ozone/om/OMMXBean.java    |   31 +
 .../hadoop/ozone/om/OMMetadataManager.java      |  253 ++++
 .../org/apache/hadoop/ozone/om/OMMetrics.java   |  459 ++++++
 .../org/apache/hadoop/ozone/om/OMStorage.java   |   90 ++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  |  526 +++++++
 .../hadoop/ozone/om/OpenKeyCleanupService.java  |  117 ++
 .../apache/hadoop/ozone/om/OzoneManager.java    |  911 ++++++++++++
 .../hadoop/ozone/om/OzoneManagerHttpServer.java |   78 +
 .../hadoop/ozone/om/ServiceListJSONServlet.java |  103 ++
 .../apache/hadoop/ozone/om/VolumeManager.java   |  100 ++
 .../hadoop/ozone/om/VolumeManagerImpl.java      |  390 +++++
 .../hadoop/ozone/om/exceptions/OMException.java |  118 ++
 .../ozone/om/exceptions/package-info.java       |   19 +
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 ...ceManagerProtocolServerSideTranslatorPB.java |  559 --------
 ...neManagerProtocolServerSideTranslatorPB.java |  571 ++++++++
 .../hadoop/ozone/protocolPB/package-info.java   |    2 +-
 .../src/main/webapps/ksm/index.html             |   70 -
 .../src/main/webapps/ksm/ksm-metrics.html       |   44 -
 .../ozone-manager/src/main/webapps/ksm/ksm.js   |  110 --
 .../ozone-manager/src/main/webapps/ksm/main.css |   23 -
 .../src/main/webapps/ksm/main.html              |   18 -
 .../src/main/webapps/ozoneManager/index.html    |   70 +
 .../src/main/webapps/ozoneManager/main.css      |   23 +
 .../src/main/webapps/ozoneManager/main.html     |   18 +
 .../main/webapps/ozoneManager/om-metrics.html   |   44 +
 .../main/webapps/ozoneManager/ozoneManager.js   |  110 ++
 .../hadoop/ozone/ksm/TestBucketManagerImpl.java |  395 -----
 .../hadoop/ozone/ksm/TestChunkStreams.java      |  234 ---
 .../ksm/TestKeySpaceManagerHttpServer.java      |  141 --
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../hadoop/ozone/om/TestBucketManagerImpl.java  |  394 +++++
 .../hadoop/ozone/om/TestChunkStreams.java       |  234 +++
 .../ozone/om/TestOzoneManagerHttpServer.java    |  141 ++
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 .../hadoop/fs/ozone/contract/OzoneContract.java |    4 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   48 +-
 188 files changed, 13252 insertions(+), 13237 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 428950b..d555036 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,10 @@ patchprocess/
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log
 
+# Ignore files generated by HDDS acceptance tests.
+hadoop-ozone/acceptance-test/docker-compose.log
+hadoop-ozone/acceptance-test/junit-results.xml
+
 #robotframework outputs
 log.html
 output.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/dev-support/bin/ozone-dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching
index be330d5..c30a37d 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -148,7 +148,7 @@ run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}"
 mkdir -p "./share/hadoop/ozonefs"
 cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
 # Optional documentation, could be missing
-cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 6573a81..3826f67 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -404,13 +404,13 @@ esac
 # export HDFS_DFSROUTER_OPTS=""
 
 ###
-# HDFS Key Space Manager specific parameters
+# Ozone Manager specific parameters
 ###
-# Specify the JVM options to be used when starting the HDFS Key Space Manager.
+# Specify the JVM options to be used when starting the Ozone Manager.
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HDFS_KSM_OPTS=""
+# export HDFS_OM_OPTS=""
 
 ###
 # HDFS StorageContainerManager specific parameters

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index 512c649..bb5e8dd 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -25,17 +25,17 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
       volumes:
          - ../../ozone:/opt/hadoop
       ports:
          - 9874:9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozone/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
index 632f870..50abb18 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.om.address=ozoneManager
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozoneperf/README.md
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/README.md b/hadoop-dist/src/main/compose/ozoneperf/README.md
index a78f208..527ff41 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/README.md
+++ b/hadoop-dist/src/main/compose/ozoneperf/README.md
@@ -67,7 +67,7 @@ http://localhost:9090/graph
 Example queries:
 
 ```
-Hadoop_KeySpaceManager_NumKeyCommits
-rate(Hadoop_KeySpaceManager_NumKeyCommits[10m])
+Hadoop_OzoneManager_NumKeyCommits
+rate(Hadoop_OzoneManager_NumKeyCommits[10m])
 rate(Hadoop_Ozone_BYTES_WRITTEN[10m])
 ```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
index 3233c11..6d1d9ca 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
@@ -26,7 +26,7 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
       volumes:
         - ../../ozone:/opt/hadoop
@@ -34,10 +34,10 @@ services:
       ports:
          - 9874:9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozoneperf/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config
index 2be22a7..2539950 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.om.address=ozoneManager
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index ad326dc..4f1b1c8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -94,7 +94,7 @@ public final class ScmConfigKeys {
       "ozone.scm.datanode.port";
   public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
 
-  // OZONE_KSM_PORT_DEFAULT = 9862
+  // OZONE_OM_PORT_DEFAULT = 9862
   public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
       "ozone.scm.block.client.port";
   public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 08a5ffd..4fad5d8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -93,7 +93,7 @@ public final class OzoneConsts {
   public static final String BLOCK_DB = "block.db";
   public static final String OPEN_CONTAINERS_DB = "openContainers.db";
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
-  public static final String KSM_DB_NAME = "ksm.db";
+  public static final String OM_DB_NAME = "om.db";
 
   /**
    * Supports Bucket Versioning.
@@ -119,13 +119,13 @@ public final class OzoneConsts {
   public static final String OPEN_KEY_ID_DELIMINATOR = "#";
 
   /**
-   * KSM LevelDB prefixes.
+   * OM LevelDB prefixes.
    *
-   * KSM DB stores metadata as KV pairs with certain prefixes,
+   * OM DB stores metadata as KV pairs with certain prefixes,
    * prefix is used to improve the performance to get related
    * metadata.
    *
-   * KSM DB Schema:
+   * OM DB Schema:
    *  ----------------------------------------------------------
    *  |  KEY                                     |     VALUE   |
    *  ----------------------------------------------------------
@@ -140,13 +140,13 @@ public final class OzoneConsts {
    *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
    *  ----------------------------------------------------------
    */
-  public static final String KSM_VOLUME_PREFIX = "/#";
-  public static final String KSM_BUCKET_PREFIX = "/#";
-  public static final String KSM_KEY_PREFIX = "/";
-  public static final String KSM_USER_PREFIX = "$";
+  public static final String OM_VOLUME_PREFIX = "/#";
+  public static final String OM_BUCKET_PREFIX = "/#";
+  public static final String OM_KEY_PREFIX = "/";
+  public static final String OM_USER_PREFIX = "$";
 
   /**
-   * Max KSM Quota size of 1024 PB.
+   * Max OM Quota size of 1024 PB.
    */
   public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
 
@@ -168,9 +168,9 @@ public final class OzoneConsts {
   public static final int INVALID_PORT = -1;
 
 
-  // The ServiceListJSONServlet context attribute where KeySpaceManager
+  // The ServiceListJSONServlet context attribute where OzoneManager
   // instance gets stored.
-  public static final String KSM_CONTEXT_ATTRIBUTE = "ozone.ksm";
+  public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om";
 
   private OzoneConsts() {
     // Never Constructed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index fb30d92..a32d559 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -38,7 +38,7 @@ import java.util.Properties;
  * Local storage information is stored in a separate file VERSION.
  * It contains type of the node,
  * the storage layout version, the SCM id, and
- * the KSM/SCM state creation time.
+ * the OM/SCM state creation time.
  *
  */
 @InterfaceAudience.Private
@@ -127,7 +127,7 @@ public abstract class Storage {
   abstract protected Properties getNodeProperties();
 
   /**
-   * Sets the Node properties spaecific to KSM/SCM.
+   * Sets the Node properties spaecific to OM/SCM.
    */
   private void setNodeProperties() {
     Properties nodeProperties = getNodeProperties();
@@ -152,7 +152,7 @@ public abstract class Storage {
    * File {@code VERSION} contains the following fields:
    * <ol>
    * <li>node type</li>
-   * <li>KSM/SCM state creation time</li>
+   * <li>OM/SCM state creation time</li>
    * <li>other fields specific for this node type</li>
    * </ol>
    * The version file is always written last during storage directory updates.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
index 7bea82a..53f408a 100644
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -46,7 +46,7 @@ message AllocateScmBlockRequestProto {
 }
 
 /**
- * A delete key request sent by KSM to SCM, it contains
+ * A delete key request sent by OM to SCM, it contains
  * multiple number of keys (and their blocks).
  */
 message DeleteScmKeyBlocksRequestProto {
@@ -56,9 +56,9 @@ message DeleteScmKeyBlocksRequestProto {
 /**
  * A object key and all its associated blocks.
  * We need to encapsulate object key name plus the blocks in this potocol
- * because SCM needs to response KSM with the keys it has deleted.
+ * because SCM needs to response OM with the keys it has deleted.
  * If the response only contains blocks, it will be very expensive for
- * KSM to figure out what keys have been deleted.
+ * OM to figure out what keys have been deleted.
  */
 message KeyBlocks {
   required string key = 1;
@@ -66,7 +66,7 @@ message KeyBlocks {
 }
 
 /**
- * A delete key response from SCM to KSM, it contains multiple child-results.
+ * A delete key response from SCM to OM, it contains multiple child-results.
  * Each child-result represents a key deletion result, only if all blocks of
  * a key are successfully deleted, this key result is considered as succeed.
  */
@@ -111,7 +111,7 @@ message AllocateScmBlockResponseProto {
 }
 
 /**
- * Protocol used from KeySpaceManager to StorageContainerManager.
+ * Protocol used from OzoneManager to StorageContainerManager.
  * See request and response messages for details of the RPC calls.
  */
 service ScmBlockLocationProtocolService {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
index b9def2a..a5ce994 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -58,9 +58,9 @@ message KeyValue {
  * Type of the node.
  */
 enum NodeType {
-    KSM = 1;
-    SCM = 2;
-    DATANODE = 3;
+    OM = 1;         // Ozone Manager
+    SCM = 2;        // Storage Container Manager
+    DATANODE = 3;   // DataNode
 }
 
 // Should we rename NodeState to DatanodeState?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 568d267..530fb09 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -21,7 +21,7 @@
 <!-- there.  If ozone-site.xml does not already exist, create it.      -->
 
 <!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE,   -->
-<!--DEBUG, CLIENT, SERVER, KSM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
+<!--DEBUG, CLIENT, SERVER, OM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
 <!--REST, STORAGE, PIPELINE, STANDALONE                                    -->
 
 <configuration>
@@ -254,122 +254,122 @@
     <description>
       Tells ozone which storage handler to use. The possible values are:
       distributed - The Ozone distributed storage handler, which speaks to
-      KSM/SCM on the backend and provides REST services to clients.
+      OM/SCM on the backend and provides REST services to clients.
       local - Local Storage handler strictly for testing - To be removed.
     </description>
   </property>
   <property>
     <name>ozone.key.deleting.limit.per.task</name>
     <value>1000</value>
-    <tag>KSM, PERFORMANCE</tag>
+    <tag>OM, PERFORMANCE</tag>
     <description>
       A maximum number of keys to be scanned by key deleting service
-      per time interval in KSM. Those keys are sent to delete metadata and
+      per time interval in OM. Those keys are sent to delete metadata and
       generate transactions in SCM for next async deletion between SCM
       and DataNode.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.address</name>
+    <name>ozone.om.address</name>
     <value/>
-    <tag>KSM, REQUIRED</tag>
+    <tag>OM, REQUIRED</tag>
     <description>
-      The address of the Ozone KSM service. This allows clients to discover
-      the KSMs address.
+      The address of the Ozone OM service. This allows clients to discover
+      the address of the OM.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.group.rights</name>
+    <name>ozone.om.group.rights</name>
     <value>READ_WRITE</value>
-    <tag>KSM, SECURITY</tag>
+    <tag>OM, SECURITY</tag>
     <description>
-      Default group permissions in Ozone KSM.
+      Default group permissions in Ozone OM.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.handler.count.key</name>
+    <name>ozone.om.handler.count.key</name>
     <value>20</value>
-    <tag>KSM, PERFORMANCE</tag>
+    <tag>OM, PERFORMANCE</tag>
     <description>
-      The number of RPC handler threads for KSM service endpoints.
+      The number of RPC handler threads for OM service endpoints.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.http-address</name>
+    <name>ozone.om.http-address</name>
     <value>0.0.0.0:9874</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
-      The address and the base port where the KSM web UI will listen on.
+      The address and the base port where the OM web UI will listen on.
 
       If the port is 0, then the server will start on a free port. However, it
       is best to specify a well-known port, so it is easy to connect and see
-      the KSM management UI.
+      the OM management UI.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.http-bind-host</name>
+    <name>ozone.om.http-bind-host</name>
     <value>0.0.0.0</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
-      The actual address the KSM web server will bind to. If this optional
+      The actual address the OM web server will bind to. If this optional
       the address is set, it overrides only the hostname portion of
-      ozone.ksm.http-address.
+      ozone.om.http-address.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.http.enabled</name>
+    <name>ozone.om.http.enabled</name>
     <value>true</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
-      Property to enable or disable KSM web user interface.
+      Property to enable or disable OM web user interface.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.https-address</name>
+    <name>ozone.om.https-address</name>
     <value>0.0.0.0:9875</value>
-    <tag>KSM, MANAGEMENT, SECURITY</tag>
+    <tag>OM, MANAGEMENT, SECURITY</tag>
     <description>
-      The address and the base port where the KSM web UI will listen
+      The address and the base port where the OM web UI will listen
       on using HTTPS.
       If the port is 0 then the server will start on a free port.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.https-bind-host</name>
+    <name>ozone.om.https-bind-host</name>
     <value>0.0.0.0</value>
-    <tag>KSM, MANAGEMENT, SECURITY</tag>
+    <tag>OM, MANAGEMENT, SECURITY</tag>
     <description>
-      The actual address the KSM web server will bind to using HTTPS.
+      The actual address the OM web server will bind to using HTTPS.
       If this optional address is set, it overrides only the hostname portion of
-      ozone.ksm.http-address.
+      ozone.om.http-address.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.keytab.file</name>
+    <name>ozone.om.keytab.file</name>
     <value/>
-    <tag>KSM, SECURITY</tag>
+    <tag>OM, SECURITY</tag>
     <description>
-      The keytab file for Kerberos authentication in KSM.
+      The keytab file for Kerberos authentication in OM.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.db.cache.size.mb</name>
+    <name>ozone.om.db.cache.size.mb</name>
     <value>128</value>
-    <tag>KSM, PERFORMANCE</tag>
+    <tag>OM, PERFORMANCE</tag>
     <description>
-      The size of KSM DB cache in MB that used for caching files.
+      The size of OM DB cache in MB that used for caching files.
       This value is set to an abnormally low value in the default configuration.
       That is to make unit testing easy. Generally, this value should be set to
       something like 16GB or more, if you intend to use Ozone at scale.
 
-      A large value for this key allows a proportionally larger amount of KSM
-      metadata to be cached in memory. This makes KSM operations faster.
+      A large value for this key allows a proportionally larger amount of OM
+      metadata to be cached in memory. This makes OM operations faster.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.user.max.volume</name>
+    <name>ozone.om.user.max.volume</name>
     <value>1024</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
       The maximum number of volumes a user can have on a cluster.Increasing or
       decreasing this number has no real impact on ozone cluster. This is
@@ -379,11 +379,11 @@
     </description>
   </property>
   <property>
-    <name>ozone.ksm.user.rights</name>
+    <name>ozone.om.user.rights</name>
     <value>READ_WRITE</value>
-    <tag>KSM, SECURITY</tag>
+    <tag>OM, SECURITY</tag>
     <description>
-      Default user permissions used in KSM.
+      Default user permissions used in OM.
     </description>
   </property>
   <property>
@@ -393,20 +393,20 @@
     <description>
       This is used only for testing purposes. This value is used by the local
       storage handler to simulate a REST backend. This is useful only when
-      debugging the REST front end independent of KSM and SCM. To be removed.
+      debugging the REST front end independent of OM and SCM. To be removed.
     </description>
   </property>
   <property>
     <name>ozone.metadata.dirs</name>
     <value/>
-    <tag>OZONE, KSM, SCM, CONTAINER, REQUIRED, STORAGE</tag>
+    <tag>OZONE, OM, SCM, CONTAINER, REQUIRED, STORAGE</tag>
     <description>
-      Ozone metadata is shared among KSM, which acts as the namespace
+      Ozone metadata is shared among OM, which acts as the namespace
       manager for ozone, SCM which acts as the block manager and data nodes
       which maintain the name of the key(Key Name and BlockIDs). This
       replicated and distributed metadata store is maintained under the
       directory pointed by this key. Since metadata can be I/O intensive, at
-      least on KSM and SCM we recommend having SSDs. If you have the luxury
+      least on OM and SCM we recommend having SSDs. If you have the luxury
       of mapping this path to SSDs on all machines in the cluster, that will
       be excellent.
 
@@ -417,10 +417,10 @@
   <property>
     <name>ozone.metastore.impl</name>
     <value>RocksDB</value>
-    <tag>OZONE, KSM, SCM, CONTAINER, STORAGE</tag>
+    <tag>OZONE, OM, SCM, CONTAINER, STORAGE</tag>
     <description>
       Ozone metadata store implementation. Ozone metadata are well
-      distributed to multiple services such as ksm, scm. They are stored in
+      distributed to multiple services such as ozoneManager, scm. They are stored in
       some local key-value databases. This property determines which database
       library to use. Supported value is either LevelDB or RocksDB.
     </description>
@@ -429,7 +429,7 @@
   <property>
     <name>ozone.metastore.rocksdb.statistics</name>
     <value>ALL</value>
-    <tag>OZONE, KSM, SCM, STORAGE, PERFORMANCE</tag>
+    <tag>OZONE, OM, SCM, STORAGE, PERFORMANCE</tag>
     <description>
       The statistics level of the rocksdb store. If you use any value from
       org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb
@@ -672,7 +672,7 @@
       The heartbeat interval from a data node to SCM. Yes,
       it is not three but 30, since most data nodes will heart beating via Ratis
       heartbeats. If a client is not able to talk to a data node, it will notify
-      KSM/SCM eventually. So a 30 second HB seems to work. This assumes that
+      OM/SCM eventually. So a 30 second HB seems to work. This assumes that
       replication strategy used is Ratis if not, this value should be set to
       something smaller like 3 seconds.
     </description>
@@ -808,7 +808,7 @@
     <value/>
     <tag>OZONE, SECURITY</tag>
     <description>
-      The server principal used by the SCM and KSM for web UI SPNEGO
+      The server principal used by the SCM and OM for web UI SPNEGO
       authentication when Kerberos security is enabled. This is typically set to
       HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix
       HTTP/ by convention.
@@ -867,9 +867,9 @@
   <property>
     <name>ozone.key.preallocation.maxsize</name>
     <value>134217728</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
+    <tag>OZONE, OM, PERFORMANCE</tag>
     <description>
-      When a new key write request is sent to KSM, if a size is requested, at most
+      When a new key write request is sent to OM, if a size is requested, at most
       128MB of size is allocated at request time. If client needs more space for the
       write, separate block allocation requests will be made.
     </description>
@@ -938,7 +938,7 @@
   <property>
     <name>ozone.open.key.cleanup.service.interval.seconds</name>
     <value>86400</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
+    <tag>OZONE, OM, PERFORMANCE</tag>
     <description>
       A background job periodically checks open key entries and delete the expired ones. This entry controls the
       interval of this cleanup check.
@@ -948,7 +948,7 @@
   <property>
     <name>ozone.open.key.expire.threshold</name>
     <value>86400</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
+    <tag>OZONE, OM, PERFORMANCE</tag>
     <description>
       Controls how long an open key operation is considered active. Specifically, if a key
       has been open longer than the value of this config entry, that open key is considered as
@@ -958,12 +958,12 @@
 
   <property>
     <name>hadoop.tags.custom</name>
-    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
+    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
   </property>
 
   <property>
     <name>ozone.tags.system</name>
-    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
+    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
   </property>
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
index 411438a..c2ed2ad 100644
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
+++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
@@ -270,7 +270,7 @@
       $http.get("conf?cmd=getOzoneTags")
         .then(function(response) {
           ctrl.tags = response.data;
-          var excludedTags = ['CBLOCK', 'KSM', 'SCM'];
+          var excludedTags = ['CBLOCK', 'OM', 'SCM'];
           for (var i = 0; i < excludedTags.length; i++) {
             var idx = ctrl.tags.indexOf(excludedTags[i]);
             // Remove CBLOCK related properties
@@ -302,7 +302,7 @@
       }
 
       ctrl.loadAll = function() {
-        $http.get("conf?cmd=getPropertyByTag&tags=KSM,SCM," + ctrl.tags)
+        $http.get("conf?cmd=getPropertyByTag&tags=OM,SCM," + ctrl.tags)
           .then(function(response) {
 
             ctrl.convertToArray(response.data);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
index 6825750..b52f653 100644
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
+++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
@@ -27,8 +27,8 @@
          ng-click="$ctrl.switchto('All')">All
       </a>
       <a class="btn"
-         ng-class="$ctrl.allSelected('KSM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('KSM')">KSM</a>
+         ng-class="$ctrl.allSelected('OM') ? 'btn-primary' :'btn-secondary'"
+         ng-click="$ctrl.switchto('OM')">OM</a>
       <a class="btn"
          ng-class="$ctrl.allSelected('SCM') ? 'btn-primary' :'btn-secondary'"
          ng-click="$ctrl.switchto('SCM')">SCM</a>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index 4f4c755..28103be 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -28,7 +28,7 @@ import java.util.Map;
 /**
  * The DeletedBlockLog is a persisted log in SCM to keep tracking
  * container blocks which are under deletion. It maintains info
- * about under-deletion container blocks that notified by KSM,
+ * about under-deletion container blocks that notified by OM,
  * and the state how it is processed.
  */
 public interface DeletedBlockLog extends Closeable {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index 3ada8fe..c23b1fd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -53,9 +53,9 @@ import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
  * client to able to write to it.
  * <p>
  * 2. Owners - Each instance of Name service, for example, Namenode of HDFS or
- * Key Space Manager (KSM) of Ozone or CBlockServer --  is an owner. It is
- * possible to have many KSMs for a Ozone cluster and only one SCM. But SCM
- * keeps the data from each KSM in separate bucket, never mixing them. To
+ * Ozone Manager (OM) of Ozone or CBlockServer --  is an owner. It is
+ * possible to have many OMs for a Ozone cluster and only one SCM. But SCM
+ * keeps the data from each OM in separate bucket, never mixing them. To
  * write data, often we have to find all open containers for a specific owner.
  * <p>
  * 3. ReplicationType - The clients are allowed to specify what kind of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
index edbcfa1..996478c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
@@ -33,7 +33,7 @@ import java.util.concurrent.locks.ReentrantLock;
 /**
  * Command Queue is queue of commands for the datanode.
  * <p>
- * Node manager, container Manager and key space managers can queue commands for
+ * Node manager, container Manager and Ozone managers can queue commands for
  * datanodes into this queue. These commands will be send in the order in which
  * there where queued.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index 98fe9a1..3bb284e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -152,7 +152,7 @@ public class SCMBlockProtocolServer implements ScmBlockLocationProtocol {
   @Override
   public List<DeleteBlockGroupResult> deleteKeyBlocks(
       List<BlockGroup> keyBlocksInfoList) throws IOException {
-    LOG.info("SCM is informed by KSM to delete {} blocks", keyBlocksInfoList
+    LOG.info("SCM is informed by OM to delete {} blocks", keyBlocksInfoList
         .size());
     List<DeleteBlockGroupResult> results = new ArrayList<>();
     for (BlockGroup keyBlocks : keyBlocksInfoList) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
index 727c81a..7828445 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
@@ -27,7 +27,7 @@ import java.io.IOException;
 import java.net.URISyntaxException;
 
 /**
- * This class is the base CLI for scm, ksm and scmadm.
+ * This class is the base CLI for scm, om and scmadm.
  */
 public abstract class OzoneBaseCLI extends Configured implements Tool {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
index c741588..6d6fea0 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
@@ -39,12 +39,12 @@ Test rest interface
                     Should contain      ${result}       200 OK
 
 Check webui static resources
-    ${result} =			Execute on		scm		curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
+    ${result} =			Execute on		scm		            curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
 	 Should contain		${result}		200
-    ${result} =			Execute on		ksm		curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
+    ${result} =			Execute on		ozoneManager		curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
 	 Should contain		${result}		200
 
 Start freon testing
-    ${result} =		Execute on		ksm		ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
+    ${result} =		Execute on		ozoneManager		ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
 	 Wait Until Keyword Succeeds	3min	10sec		Should contain		${result}		Number of Keys added: 125
 	 Should Not Contain		${result}		ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
index b50f42d..99f2831 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
@@ -25,18 +25,18 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
-      hostname: ksm
+      hostname: ozoneManager
       volumes:
          - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
index c3ec2ef..b72085b 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
@@ -14,8 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
-OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
+OZONE-SITE.XML_ozone.om.address=ozoneManager
+OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
index 9521ad6..f4be3e0 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
@@ -28,22 +28,22 @@ ${PROJECTDIR}           ${CURDIR}/../../../../../..
 
 *** Test Cases ***
 RestClient without http port
-   Test ozone shell       http://          ksm              restwoport        True
+   Test ozone shell       http://          ozoneManager          restwoport        True
 
 RestClient with http port
-   Test ozone shell       http://          ksm:9874         restwport         True
+   Test ozone shell       http://          ozoneManager:9874     restwport         True
 
 RestClient without host name
-   Test ozone shell       http://          ${EMPTY}         restwohost        True
+   Test ozone shell       http://          ${EMPTY}              restwohost        True
 
 RpcClient with port
-   Test ozone shell       o3://            ksm:9862         rpcwoport         False
+   Test ozone shell       o3://            ozoneManager:9862     rpcwoport         False
 
 RpcClient without host
-   Test ozone shell       o3://            ${EMPTY}         rpcwport          False
+   Test ozone shell       o3://            ${EMPTY}              rpcwport          False
 
 RpcClient without scheme
-   Test ozone shell       ${EMPTY}         ${EMPTY}         rpcwoscheme       False
+   Test ozone shell       ${EMPTY}         ${EMPTY}              rpcwoscheme       False
 
 
 *** Keywords ***
@@ -52,7 +52,7 @@ Test ozone shell
     ${result} =     Execute on          datanode        ozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root
                     Should not contain  ${result}       Failed
                     Should contain      ${result}       Creating Volume: ${volume}
-    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ozoneManager -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
                     Should contain      ${result}       createdOn
                     Execute on          datanode        ozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB
     ${result} =     Execute on          datanode        ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
@@ -66,7 +66,7 @@ Test ozone shell
                     Should Be Equal     ${result}       GROUP
     ${result} =     Execute on          datanode        ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
                     Should Be Equal     ${result}       USER
-    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ozoneManager/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
                     Should Be Equal     ${result}       ${volume}
                     Run Keyword and Return If           ${withkeytest}        Test key handling       ${protocol}       ${server}       ${volume}
                     Execute on          datanode        ozone oz -deleteBucket ${protocol}${server}/${volume}/bb1
@@ -80,6 +80,6 @@ Test key handling
                     Execute on          datanode        ls -l NOTICE.txt.1
     ${result} =     Execute on          datanode        ozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
                     Should contain      ${result}       createdOn
-    ${result} =     Execute on          datanode        ozone oz -listKey o3://ksm/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+    ${result} =     Execute on          datanode        ozone oz -listKey o3://ozoneManager/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
                     Should Be Equal     ${result}       key1
                     Execute on          datanode        ozone oz -deleteKey ${protocol}${server}/${volume}/bb1/key1 -v

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
index a5ea30a..9235cd9 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
@@ -21,12 +21,12 @@ Startup Ozone cluster with size
                                              Run                         echo "Starting new docker-compose environment" >> docker-compose.log
     ${rc}        ${output} =                 Run docker compose          up -d
     Should Be Equal As Integers             ${rc}                       0
-    Wait Until Keyword Succeeds             1min    5sec    Is Daemon started   ksm     HTTP server of KSM is listening
+    Wait Until Keyword Succeeds             1min    5sec    Is Daemon started   ozoneManager     HTTP server of OZONEMANAGER is listening
     Daemons are running without error
     Scale datanodes up                      5
 
 Daemons are running without error
-    Is daemon running without error           ksm
+    Is daemon running without error           ozoneManager
     Is daemon running without error           scm
     Is daemon running without error           datanode
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
index 12022df..6b7b7bd 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
@@ -25,18 +25,18 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
-      hostname: ksm
+      hostname: ozoneManager
       volumes:
          - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
index e06d434..b0129bc 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
@@ -15,8 +15,8 @@
 # limitations under the License.
 
 CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
-OZONE-SITE.XML_ozone.ksm.address=ksm
-OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
+OZONE-SITE.XML_ozone.om.address=ozoneManager
+OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
index 9e8a5d2..ea473c0 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
@@ -27,13 +27,13 @@ ${PROJECTDIR}           ${CURDIR}/../../../../../..
 
 *** Test Cases ***
 Create volume and bucket
-    Execute on          datanode        ozone oz -createVolume http://ksm/fstest -user bilbo -quota 100TB -root
-    Execute on          datanode        ozone oz -createBucket http://ksm/fstest/bucket1
+    Execute on          datanode        ozone oz -createVolume http://ozoneManager/fstest -user bilbo -quota 100TB -root
+    Execute on          datanode        ozone oz -createBucket http://ozoneManager/fstest/bucket1
 
 Check volume from ozonefs
     ${result} =         Execute on          hadooplast        hdfs dfs -ls o3://bucket1.fstest/
 
 Create directory from ozonefs
                         Execute on          hadooplast        hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep
-    ${result} =         Execute on          ksm               ozone oz -listKey o3://ksm/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute on          ozoneManager      ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
                                             Should contain    ${result}         testdir/deep

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
index 39b7bb8..0da52dc 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
@@ -81,7 +81,7 @@ public final class BucketArgs {
   }
 
   /**
-   * Returns new builder class that builds a KsmBucketInfo.
+   * Returns new builder class that builds a OmBucketInfo.
    *
    * @return Builder
    */
@@ -90,7 +90,7 @@ public final class BucketArgs {
   }
 
   /**
-   * Builder for KsmBucketInfo.
+   * Builder for OmBucketInfo.
    */
   public static class Builder {
     private Boolean versioning;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 3085b0d..de0d166 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.client;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.KsmUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.RestClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
@@ -34,11 +34,9 @@ import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Proxy;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_PROTOCOL;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_PROTOCOL;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
 
 /**
  * Factory class to create different types of OzoneClients.
@@ -97,46 +95,46 @@ public final class OzoneClientFactory {
   /**
    * Returns an OzoneClient which will use RPC protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRpcClient(String ksmHost)
+  public static OzoneClient getRpcClient(String omHost)
       throws IOException {
     Configuration config = new OzoneConfiguration();
-    int port = KsmUtils.getKsmRpcPort(config);
-    return getRpcClient(ksmHost, port, config);
+    int port = OmUtils.getOmRpcPort(config);
+    return getRpcClient(omHost, port, config);
   }
 
   /**
    * Returns an OzoneClient which will use RPC protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmRpcPort
-   *        RPC port of KeySpaceManager.
+   * @param omRpcPort
+   *        RPC port of OzoneManager.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRpcClient(String ksmHost, Integer ksmRpcPort)
+  public static OzoneClient getRpcClient(String omHost, Integer omRpcPort)
       throws IOException {
-    return getRpcClient(ksmHost, ksmRpcPort, new OzoneConfiguration());
+    return getRpcClient(omHost, omRpcPort, new OzoneConfiguration());
   }
 
   /**
    * Returns an OzoneClient which will use RPC protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmRpcPort
-   *        RPC port of KeySpaceManager.
+   * @param omRpcPort
+   *        RPC port of OzoneManager.
    *
    * @param config
    *        Configuration to be used for OzoneClient creation
@@ -145,13 +143,13 @@ public final class OzoneClientFactory {
    *
    * @throws IOException
    */
-  public static OzoneClient getRpcClient(String ksmHost, Integer ksmRpcPort,
+  public static OzoneClient getRpcClient(String omHost, Integer omRpcPort,
                                          Configuration config)
       throws IOException {
-    Preconditions.checkNotNull(ksmHost);
-    Preconditions.checkNotNull(ksmRpcPort);
+    Preconditions.checkNotNull(omHost);
+    Preconditions.checkNotNull(omRpcPort);
     Preconditions.checkNotNull(config);
-    config.set(OZONE_KSM_ADDRESS_KEY, ksmHost + ":" + ksmRpcPort);
+    config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort);
     return getRpcClient(config);
   }
 
@@ -175,46 +173,46 @@ public final class OzoneClientFactory {
   /**
    * Returns an OzoneClient which will use REST protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRestClient(String ksmHost)
+  public static OzoneClient getRestClient(String omHost)
       throws IOException {
     Configuration config = new OzoneConfiguration();
-    int port = KsmUtils.getKsmRestPort(config);
-    return getRestClient(ksmHost, port, config);
+    int port = OmUtils.getOmRestPort(config);
+    return getRestClient(omHost, port, config);
   }
 
   /**
    * Returns an OzoneClient which will use REST protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmHttpPort
-   *        HTTP port of KeySpaceManager.
+   * @param omHttpPort
+   *        HTTP port of OzoneManager.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRestClient(String ksmHost, Integer ksmHttpPort)
+  public static OzoneClient getRestClient(String omHost, Integer omHttpPort)
       throws IOException {
-    return getRestClient(ksmHost, ksmHttpPort, new OzoneConfiguration());
+    return getRestClient(omHost, omHttpPort, new OzoneConfiguration());
   }
 
   /**
    * Returns an OzoneClient which will use REST protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmHttpPort
-   *        HTTP port of KeySpaceManager.
+   * @param omHttpPort
+   *        HTTP port of OzoneManager.
    *
    * @param config
    *        Configuration to be used for OzoneClient creation
@@ -223,13 +221,13 @@ public final class OzoneClientFactory {
    *
    * @throws IOException
    */
-  public static OzoneClient getRestClient(String ksmHost, Integer ksmHttpPort,
+  public static OzoneClient getRestClient(String omHost, Integer omHttpPort,
                                           Configuration config)
       throws IOException {
-    Preconditions.checkNotNull(ksmHost);
-    Preconditions.checkNotNull(ksmHttpPort);
+    Preconditions.checkNotNull(omHost);
+    Preconditions.checkNotNull(omHttpPort);
     Preconditions.checkNotNull(config);
-    config.set(OZONE_KSM_HTTP_ADDRESS_KEY, ksmHost + ":" +  ksmHttpPort);
+    config.set(OZONE_OM_HTTP_ADDRESS_KEY, omHost + ":" + omHttpPort);
     return getRestClient(config);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
index 0c723dd..7c93146 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
@@ -49,7 +49,7 @@ public class OzoneKey {
   private long modificationTime;
 
   /**
-   * Constructs OzoneKey from KsmKeyInfo.
+   * Constructs OzoneKey from OmKeyInfo.
    *
    */
   public OzoneKey(String volumeName, String bucketName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
index f1aa031..ae1cfcc 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
@@ -77,7 +77,7 @@ public final class VolumeArgs {
     return acls;
   }
   /**
-   * Returns new builder class that builds a KsmVolumeArgs.
+   * Returns new builder class that builds a OmVolumeArgs.
    *
    * @return Builder
    */
@@ -86,7 +86,7 @@ public final class VolumeArgs {
   }
 
   /**
-   * Builder for KsmVolumeArgs.
+   * Builder for OmVolumeArgs.
    */
   public static class Builder {
     private String adminName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index edd85aa..b3a566e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -23,8 +23,8 @@ import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
@@ -255,28 +255,29 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
     }
   }
 
-  public static LengthInputStream getFromKsmKeyInfo(KsmKeyInfo keyInfo,
+  public static LengthInputStream getFromOmKeyInfo(
+      OmKeyInfo keyInfo,
       XceiverClientManager xceiverClientManager,
       StorageContainerLocationProtocolClientSideTranslatorPB
-          storageContainerLocationClient, String requestId)
-      throws IOException {
+          storageContainerLocationClient,
+      String requestId) throws IOException {
     long length = 0;
     long containerKey;
     ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream();
     groupInputStream.key = keyInfo.getKeyName();
-    List<KsmKeyLocationInfo> keyLocationInfos =
+    List<OmKeyLocationInfo> keyLocationInfos =
         keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
     groupInputStream.streamOffset = new long[keyLocationInfos.size()];
     for (int i = 0; i < keyLocationInfos.size(); i++) {
-      KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i);
-      BlockID blockID = ksmKeyLocationInfo.getBlockID();
+      OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(i);
+      BlockID blockID = omKeyLocationInfo.getBlockID();
       long containerID = blockID.getContainerID();
       ContainerWithPipeline containerWithPipeline =
           storageContainerLocationClient.getContainerWithPipeline(containerID);
       XceiverClientSpi xceiverClient = xceiverClientManager
           .acquireClient(containerWithPipeline.getPipeline(), containerID);
       boolean success = false;
-      containerKey = ksmKeyLocationInfo.getLocalID();
+      containerKey = omKeyLocationInfo.getLocalID();
       try {
         LOG.debug("get key accessing {} {}",
             containerID, containerKey);
@@ -292,11 +293,10 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
         }
         success = true;
         ChunkInputStream inputStream = new ChunkInputStream(
-            ksmKeyLocationInfo.getBlockID(), xceiverClientManager,
-            xceiverClient,
+            omKeyLocationInfo.getBlockID(), xceiverClientManager, xceiverClient,
             chunks, requestId);
         groupInputStream.addStream(inputStream,
-            ksmKeyLocationInfo.getLength());
+            omKeyLocationInfo.getLength());
       } finally {
         if (!success) {
           xceiverClientManager.releaseClient(xceiverClient);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index d1a3b46..9443317 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -24,15 +24,15 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -67,10 +67,10 @@ public class ChunkGroupOutputStream extends OutputStream {
   private final ArrayList<ChunkOutputStreamEntry> streamEntries;
   private int currentStreamIndex;
   private long byteOffset;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB ksmClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB omClient;
   private final
       StorageContainerLocationProtocolClientSideTranslatorPB scmClient;
-  private final KsmKeyArgs keyArgs;
+  private final OmKeyArgs keyArgs;
   private final int openID;
   private final XceiverClientManager xceiverClientManager;
   private final int chunkSize;
@@ -83,7 +83,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   @VisibleForTesting
   public ChunkGroupOutputStream() {
     streamEntries = new ArrayList<>();
-    ksmClient = null;
+    omClient = null;
     scmClient = null;
     keyArgs = null;
     openID = -1;
@@ -113,16 +113,16 @@ public class ChunkGroupOutputStream extends OutputStream {
   public ChunkGroupOutputStream(
       OpenKeySession handler, XceiverClientManager xceiverClientManager,
       StorageContainerLocationProtocolClientSideTranslatorPB scmClient,
-      KeySpaceManagerProtocolClientSideTranslatorPB ksmClient,
+      OzoneManagerProtocolClientSideTranslatorPB omClient,
       int chunkSize, String requestId, ReplicationFactor factor,
       ReplicationType type) throws IOException {
     this.streamEntries = new ArrayList<>();
     this.currentStreamIndex = 0;
     this.byteOffset = 0;
-    this.ksmClient = ksmClient;
+    this.omClient = omClient;
     this.scmClient = scmClient;
-    KsmKeyInfo info = handler.getKeyInfo();
-    this.keyArgs = new KsmKeyArgs.Builder()
+    OmKeyInfo info = handler.getKeyInfo();
+    this.keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(info.getVolumeName())
         .setBucketName(info.getBucketName())
         .setKeyName(info.getKeyName())
@@ -150,19 +150,19 @@ public class ChunkGroupOutputStream extends OutputStream {
    * @param openVersion the version corresponding to the pre-allocation.
    * @throws IOException
    */
-  public void addPreallocateBlocks(KsmKeyLocationInfoGroup version,
+  public void addPreallocateBlocks(OmKeyLocationInfoGroup version,
       long openVersion) throws IOException {
     // server may return any number of blocks, (0 to any)
     // only the blocks allocated in this open session (block createVersion
     // equals to open session version)
-    for (KsmKeyLocationInfo subKeyInfo : version.getLocationList()) {
+    for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) {
       if (subKeyInfo.getCreateVersion() == openVersion) {
         checkKeyLocationInfo(subKeyInfo);
       }
     }
   }
 
-  private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
+  private void checkKeyLocationInfo(OmKeyLocationInfo subKeyInfo)
       throws IOException {
     ContainerWithPipeline containerWithPipeline = scmClient
         .getContainerWithPipeline(subKeyInfo.getContainerID());
@@ -210,7 +210,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     checkNotClosed();
 
     if (streamEntries.size() <= currentStreamIndex) {
-      Preconditions.checkNotNull(ksmClient);
+      Preconditions.checkNotNull(omClient);
       // allocate a new block, if a exception happens, log an error and
       // throw exception to the caller directly, and the write fails.
       try {
@@ -258,7 +258,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     int succeededAllocates = 0;
     while (len > 0) {
       if (streamEntries.size() <= currentStreamIndex) {
-        Preconditions.checkNotNull(ksmClient);
+        Preconditions.checkNotNull(omClient);
         // allocate a new block, if a exception happens, log an error and
         // throw exception to the caller directly, and the write fails.
         try {
@@ -286,7 +286,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   }
 
   /**
-   * Contact KSM to get a new block. Set the new block with the index (e.g.
+   * Contact OM to get a new block. Set the new block with the index (e.g.
    * first block has index = 0, second has index = 1 etc.)
    *
    * The returned block is made to new ChunkOutputStreamEntry to write.
@@ -295,7 +295,7 @@ public class ChunkGroupOutputStream extends OutputStream {
    * @throws IOException
    */
   private void allocateNewBlock(int index) throws IOException {
-    KsmKeyLocationInfo subKeyInfo = ksmClient.allocateBlock(keyArgs, openID);
+    OmKeyLocationInfo subKeyInfo = omClient.allocateBlock(keyArgs, openID);
     checkKeyLocationInfo(subKeyInfo);
   }
 
@@ -311,7 +311,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   }
 
   /**
-   * Commit the key to KSM, this will add the blocks as the new key blocks.
+   * Commit the key to OM, this will add the blocks as the new key blocks.
    *
    * @throws IOException
    */
@@ -329,7 +329,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     if (keyArgs != null) {
       // in test, this could be null
       keyArgs.setDataSize(byteOffset);
-      ksmClient.commitKey(keyArgs, openID);
+      omClient.commitKey(keyArgs, openID);
     } else {
       LOG.warn("Closing ChunkGroupOutputStream, but key args is null");
     }
@@ -342,7 +342,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     private OpenKeySession openHandler;
     private XceiverClientManager xceiverManager;
     private StorageContainerLocationProtocolClientSideTranslatorPB scmClient;
-    private KeySpaceManagerProtocolClientSideTranslatorPB ksmClient;
+    private OzoneManagerProtocolClientSideTranslatorPB omClient;
     private int chunkSize;
     private String requestID;
     private ReplicationType type;
@@ -364,9 +364,9 @@ public class ChunkGroupOutputStream extends OutputStream {
       return this;
     }
 
-    public Builder setKsmClient(
-        KeySpaceManagerProtocolClientSideTranslatorPB client) {
-      this.ksmClient = client;
+    public Builder setOmClient(
+        OzoneManagerProtocolClientSideTranslatorPB client) {
+      this.omClient = client;
       return this;
     }
 
@@ -392,7 +392,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 
     public ChunkGroupOutputStream build() throws IOException {
       return new ChunkGroupOutputStream(openHandler, xceiverManager, scmClient,
-          ksmClient, chunkSize, requestID, factor, type);
+          omClient, chunkSize, requestID, factor, type);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
index 93b3417..abdc2fb 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.client.rest;
 
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 
 import java.util.List;
 import java.util.Random;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index 6e3f617..78fbe8d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -45,10 +45,9 @@ import org.apache.hadoop.ozone.client.rest.headers.Header;
 import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
 import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
 import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
 import org.apache.hadoop.ozone.web.response.ListBuckets;
 import org.apache.hadoop.ozone.web.response.ListKeys;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
@@ -152,8 +151,8 @@ public class RestClient implements ClientProtocol {
                   .build())
           .build();
       this.ugi = UserGroupInformation.getCurrentUser();
-      this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-          KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
+      this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
+          OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
 
       // TODO: Add new configuration parameter to configure RestServerSelector.
       RestServerSelector defaultSelector = new DefaultRestServerSelector();
@@ -171,11 +170,11 @@ public class RestClient implements ClientProtocol {
 
   private InetSocketAddress getOzoneRestServerAddress(
       RestServerSelector selector) throws IOException {
-    String httpAddress = conf.get(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY);
+    String httpAddress = conf.get(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY);
 
     if (httpAddress == null) {
       throw new IllegalArgumentException(
-          KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY + " must be defined. See" +
+          OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY + " must be defined. See" +
               " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
               " details on configuring Ozone.");
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HDFS-13121. NPE when request file descriptors when SC read. Contributed by Zsolt Venczel.

Posted by bh...@apache.org.
HDFS-13121. NPE when request file descriptors when SC read. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0247cb63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0247cb63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0247cb63

Branch: refs/heads/HDDS-48
Commit: 0247cb6318507afe06816e337a19f396afc53efa
Parents: 061b168
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Jul 6 14:59:49 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Jul 6 14:59:49 2018 -0700

----------------------------------------------------------------------
 .../hdfs/client/impl/BlockReaderFactory.java    |  5 ++
 .../shortcircuit/TestShortCircuitCache.java     | 89 ++++++++++++++++++++
 2 files changed, 94 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0247cb63/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 1003b95..ce43185 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -598,6 +598,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       sock.recvFileInputStreams(fis, buf, 0, buf.length);
       ShortCircuitReplica replica = null;
       try {
+        if (fis[0] == null || fis[1] == null) {
+          throw new IOException("the datanode " + datanode + " failed to " +
+              "pass a file descriptor (might have reached open file limit).");
+        }
+
         ExtendedBlockId key =
             new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
         if (buf[0] == USE_RECEIPT_VERIFICATION.getNumber()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0247cb63/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 4e2cede..ac29c3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -42,6 +42,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.ClientContext;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.PeerCache;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.DFSInputStream;
@@ -50,10 +54,12 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.DomainPeer;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
@@ -66,9 +72,11 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
@@ -819,4 +827,85 @@ public class TestShortCircuitCache {
         .fetch(Mockito.eq(extendedBlockId), Mockito.any());
     }
   }
+
+  @Test
+  public void testRequestFileDescriptorsWhenULimit() throws Exception {
+    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
+    Configuration conf = createShortCircuitConf(
+        "testRequestFileDescriptorsWhenULimit", sockDir);
+
+    final short replicas = 1;
+    final int fileSize = 3;
+    final String testFile = "/testfile";
+
+    try (MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(replicas).build()) {
+
+      cluster.waitActive();
+
+      DistributedFileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, new Path(testFile), fileSize, replicas, 0L);
+
+      LocatedBlock blk = new DFSClient(DFSUtilClient.getNNAddress(conf), conf)
+          .getLocatedBlocks(testFile, 0, fileSize).get(0);
+
+      ClientContext clientContext = Mockito.mock(ClientContext.class);
+      Mockito.when(clientContext.getPeerCache()).thenAnswer(
+          (Answer<PeerCache>) peerCacheCall -> {
+            PeerCache peerCache = new PeerCache(10, Long.MAX_VALUE);
+            DomainPeer peer = Mockito.spy(getDomainPeerToDn(conf));
+            peerCache.put(blk.getLocations()[0], peer);
+
+            Mockito.when(peer.getDomainSocket()).thenAnswer(
+                (Answer<DomainSocket>) domainSocketCall -> {
+                  DomainSocket domainSocket = Mockito.mock(DomainSocket.class);
+                  Mockito.when(domainSocket
+                      .recvFileInputStreams(
+                          Mockito.any(FileInputStream[].class),
+                          Mockito.any(byte[].class),
+                          Mockito.anyInt(),
+                          Mockito.anyInt())
+                  ).thenAnswer(
+                      // we are mocking the FileOutputStream array with nulls
+                      (Answer<Void>) recvFileInputStreamsCall -> null
+                  );
+                  return domainSocket;
+                }
+            );
+
+            return peerCache;
+          });
+
+      Mockito.when(clientContext.getShortCircuitCache()).thenAnswer(
+          (Answer<ShortCircuitCache>) shortCircuitCacheCall -> {
+            ShortCircuitCache cache = Mockito.mock(ShortCircuitCache.class);
+            Mockito.when(cache.allocShmSlot(
+                Mockito.any(DatanodeInfo.class),
+                Mockito.any(DomainPeer.class),
+                Mockito.any(MutableBoolean.class),
+                Mockito.any(ExtendedBlockId.class),
+                Mockito.anyString()))
+                .thenAnswer((Answer<Slot>) call -> null);
+
+            return cache;
+          }
+      );
+
+      DatanodeInfo[] nodes = blk.getLocations();
+
+      try {
+        Assert.assertNull(new BlockReaderFactory(new DfsClientConf(conf))
+            .setInetSocketAddress(NetUtils.createSocketAddr(nodes[0]
+                .getXferAddr()))
+            .setClientCacheContext(clientContext)
+            .setDatanodeInfo(blk.getLocations()[0])
+            .setBlock(blk.getBlock())
+            .setBlockToken(new Token())
+            .createShortCircuitReplicaInfo());
+      } catch (NullPointerException ex) {
+        Assert.fail("Should not throw NPE when the native library is unable " +
+            "to create new files!");
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
new file mode 100644
index 0000000..7c8595c
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
@@ -0,0 +1,1349 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServicePort;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.web.request.OzoneQuota;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.web.response.KeyInfo;
+import org.apache.hadoop.ozone.web.response.VolumeInfo;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.web.handlers.ListArgs;
+import org.apache.hadoop.ozone.web.response.ListBuckets;
+import org.apache.hadoop.ozone.web.response.ListKeys;
+import org.apache.hadoop.ozone.web.response.ListVolumes;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.MetadataKeyFilters;
+import org.apache.hadoop.utils.MetadataStore;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.net.InetSocketAddress;
+import java.text.ParseException;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.List;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CLIENT_ADDRESS_KEY;
+
+/**
+ * Test Ozone Manager operation in distributed handler scenario.
+ */
+public class TestOzoneManager {
+  private static MiniOzoneCluster cluster = null;
+  private static StorageHandler storageHandler;
+  private static UserArgs userArgs;
+  private static OMMetrics omMetrics;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
+    cluster =  MiniOzoneCluster.newBuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOmId(omId)
+        .build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    omMetrics = cluster.getOzoneManager().getMetrics();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  // Create a volume and test its attribute after creating them
+  @Test(timeout = 60000)
+  public void testCreateVolume() throws IOException, OzoneException {
+    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName));
+    Assert.assertEquals(volumeCreateFailCount,
+        omMetrics.getNumVolumeCreateFails());
+  }
+
+  // Create a volume and modify the volume owner and then test its attributes
+  @Test(timeout = 60000)
+  public void testChangeVolumeOwner() throws IOException, OzoneException {
+    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
+    long volumeInfoFailCount = omMetrics.getNumVolumeInfoFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    String newUserName = "user" + RandomStringUtils.randomNumeric(5);
+    createVolumeArgs.setUserName(newUserName);
+    storageHandler.setVolumeOwner(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+
+    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
+    Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName));
+    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName));
+    Assert.assertEquals(volumeCreateFailCount,
+        omMetrics.getNumVolumeCreateFails());
+    Assert.assertEquals(volumeInfoFailCount,
+        omMetrics.getNumVolumeInfoFails());
+  }
+
+  // Create a volume and modify the volume owner and then test its attributes
+  @Test(timeout = 60000)
+  public void testChangeVolumeQuota() throws IOException, OzoneException {
+    long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails();
+    long numVolumeInfoFail = omMetrics.getNumVolumeInfoFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    Random rand = new Random();
+
+    // Create a new volume with a quota
+    OzoneQuota createQuota =
+        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    createVolumeArgs.setQuota(createQuota);
+    storageHandler.createVolume(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertEquals(createQuota.sizeInBytes(),
+        retVolumeInfo.getQuota().sizeInBytes());
+
+    // Set a new quota and test it
+    OzoneQuota setQuota =
+        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
+    createVolumeArgs.setQuota(setQuota);
+    storageHandler.setVolumeQuota(createVolumeArgs, false);
+    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertEquals(setQuota.sizeInBytes(),
+        retVolumeInfo.getQuota().sizeInBytes());
+
+    // Remove the quota and test it again
+    storageHandler.setVolumeQuota(createVolumeArgs, true);
+    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertEquals(OzoneConsts.MAX_QUOTA_IN_BYTES,
+        retVolumeInfo.getQuota().sizeInBytes());
+    Assert.assertEquals(numVolumeCreateFail,
+        omMetrics.getNumVolumeCreateFails());
+    Assert.assertEquals(numVolumeInfoFail,
+        omMetrics.getNumVolumeInfoFails());
+  }
+
+  // Create a volume and then delete it and then check for deletion
+  @Test(timeout = 60000)
+  public void testDeleteVolume() throws IOException, OzoneException {
+    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String volumeName1 = volumeName + "_A";
+    String volumeName2 = volumeName + "_AA";
+    VolumeArgs volumeArgs = null;
+    VolumeInfo volumeInfo = null;
+
+    // Create 2 empty volumes with same prefix.
+    volumeArgs = new VolumeArgs(volumeName1, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+
+    volumeArgs = new VolumeArgs(volumeName2, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+
+    volumeArgs  = new VolumeArgs(volumeName1, userArgs);
+    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
+    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName1));
+    Assert.assertTrue(volumeInfo.getOwner().getName().equals(userName));
+    Assert.assertEquals(volumeCreateFailCount,
+        omMetrics.getNumVolumeCreateFails());
+
+    // Volume with _A should be able to delete as it is empty.
+    storageHandler.deleteVolume(volumeArgs);
+
+    // Make sure volume with _AA suffix still exists.
+    volumeArgs = new VolumeArgs(volumeName2, userArgs);
+    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
+    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName2));
+
+    // Make sure volume with _A suffix is successfully deleted.
+    exception.expect(IOException.class);
+    exception.expectMessage("Info Volume failed, error:VOLUME_NOT_FOUND");
+    volumeArgs = new VolumeArgs(volumeName1, userArgs);
+    storageHandler.getVolumeInfo(volumeArgs);
+  }
+
+  // Create a volume and a bucket inside the volume,
+  // then delete it and then check for deletion failure
+  @Test(timeout = 60000)
+  public void testFailedDeleteVolume() throws IOException, OzoneException {
+    long numVolumeCreateFails = omMetrics.getNumVolumeCreateFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
+    Assert.assertEquals(numVolumeCreateFails,
+        omMetrics.getNumVolumeCreateFails());
+
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    try {
+      storageHandler.deleteVolume(createVolumeArgs);
+      Assert.fail("Expecting deletion should fail "
+          + "because volume is not empty");
+    } catch (IOException ex) {
+      Assert.assertEquals(ex.getMessage(),
+          "Delete Volume failed, error:VOLUME_NOT_EMPTY");
+    }
+    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
+  }
+
+  // Create a volume and test Volume access for a different user
+  @Test(timeout = 60000)
+  public void testAccessVolume() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String[] groupName =
+        {"group" + RandomStringUtils.randomNumeric(5)};
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    createVolumeArgs.setGroups(groupName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, userName,
+        OzoneAcl.OzoneACLRights.READ_WRITE);
+    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, userAcl));
+    OzoneAcl group = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, groupName[0],
+        OzoneAcl.OzoneACLRights.READ);
+    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, group));
+
+    // Create a different user and access should fail
+    String falseUserName = "user" + RandomStringUtils.randomNumeric(5);
+    OzoneAcl falseUserAcl =
+        new OzoneAcl(OzoneAcl.OzoneACLType.USER, falseUserName,
+            OzoneAcl.OzoneACLRights.READ_WRITE);
+    Assert.assertFalse(storageHandler
+        .checkVolumeAccess(volumeName, falseUserAcl));
+    // Checking access with user name and Group Type should fail
+    OzoneAcl falseGroupAcl = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, userName,
+        OzoneAcl.OzoneACLRights.READ_WRITE);
+    Assert.assertFalse(storageHandler
+        .checkVolumeAccess(volumeName, falseGroupAcl));
+
+    // Access for acl type world should also fail
+    OzoneAcl worldAcl =
+        new OzoneAcl(OzoneAcl.OzoneACLType.WORLD, "",
+            OzoneAcl.OzoneACLRights.READ);
+    Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl));
+
+    Assert.assertEquals(0, omMetrics.getNumVolumeCheckAccessFails());
+    Assert.assertEquals(0, omMetrics.getNumVolumeCreateFails());
+  }
+
+  @Test(timeout = 60000)
+  public void testCreateBucket() throws IOException, OzoneException {
+    long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails();
+    long numBucketCreateFail = omMetrics.getNumBucketCreateFails();
+    long numBucketInfoFail = omMetrics.getNumBucketInfoFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    Assert.assertEquals(numVolumeCreateFail,
+        omMetrics.getNumVolumeCreateFails());
+    Assert.assertEquals(numBucketCreateFail,
+        omMetrics.getNumBucketCreateFails());
+    Assert.assertEquals(numBucketInfoFail,
+        omMetrics.getNumBucketInfoFails());
+  }
+
+  @Test(timeout = 60000)
+  public void testDeleteBucket() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    storageHandler.deleteBucket(bucketArgs);
+    exception.expect(IOException.class);
+    exception.expectMessage("Info Bucket failed, error: BUCKET_NOT_FOUND");
+    storageHandler.getBucketInfo(getBucketArgs);
+  }
+
+  @Test(timeout = 60000)
+  public void testDeleteNonExistingBucket() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    BucketArgs newBucketArgs = new BucketArgs(
+        volumeName, bucketName + "_invalid", userArgs);
+    exception.expect(IOException.class);
+    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_FOUND");
+    storageHandler.deleteBucket(newBucketArgs);
+  }
+
+
+  @Test(timeout = 60000)
+  public void testDeleteNonEmptyBucket() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    String dataString = RandomStringUtils.randomAscii(100);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    exception.expect(IOException.class);
+    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_EMPTY");
+    storageHandler.deleteBucket(bucketArgs);
+  }
+
+  /**
+   * Basic test of both putKey and getKey from OM, as one can not be tested
+   * without the other.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testGetKeyWriterReader() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyAllocates = omMetrics.getNumKeyAllocates();
+    long numKeyLookups = omMetrics.getNumKeyLookups();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(100);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    Assert.assertEquals(1 + numKeyAllocates, omMetrics.getNumKeyAllocates());
+
+    byte[] data = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
+    Assert.assertEquals(1 + numKeyLookups, omMetrics.getNumKeyLookups());
+  }
+
+  /**
+   * Test write the same key twice, the second write should fail, as currently
+   * key overwrite is not supported.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testKeyOverwrite() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyAllocateFails = omMetrics.getNumKeyAllocateFails();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(100);
+    String dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+
+    // We allow the key overwrite to be successful. Please note : Till
+    // HDFS-11922 is fixed this causes a data block leak on the data node side.
+    // That is this overwrite only overwrites the keys on OM. We need to
+    // garbage collect those blocks from datanode.
+    KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    storageHandler.newKeyWriter(keyArgs2);
+    Assert
+        .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails());
+  }
+
+  /**
+   * Test get a non-exiting key.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testGetNonExistKey() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyLookupFails = omMetrics.getNumKeyLookupFails();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    // try to get the key, should fail as it hasn't been created
+    exception.expect(IOException.class);
+    exception.expectMessage("KEY_NOT_FOUND");
+    storageHandler.newKeyReader(keyArgs);
+    Assert.assertEquals(1 + numKeyLookupFails,
+        omMetrics.getNumKeyLookupFails());
+  }
+
+  /**
+   * Test delete keys for om.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testDeleteKey() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyDeletes = omMetrics.getNumKeyDeletes();
+    long numKeyDeleteFails = omMetrics.getNumKeyDeletesFails();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(100);
+    String dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+
+    storageHandler.deleteKey(keyArgs);
+    Assert.assertEquals(1 + numKeyDeletes, omMetrics.getNumKeyDeletes());
+
+    // Make sure the deleted key has been renamed.
+    MetadataStore store = cluster.getOzoneManager().
+        getMetadataManager().getStore();
+    List<Map.Entry<byte[], byte[]>> list = store.getRangeKVs(null, 10,
+        new MetadataKeyFilters.KeyPrefixFilter()
+            .addFilter(DELETING_KEY_PREFIX));
+    Assert.assertEquals(1, list.size());
+
+    // Delete the key again to test deleting non-existing key.
+    try {
+      storageHandler.deleteKey(keyArgs);
+      Assert.fail("Expected exception not thrown.");
+    } catch (IOException ioe) {
+      Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
+    }
+    Assert.assertEquals(1 + numKeyDeleteFails,
+        omMetrics.getNumKeyDeletesFails());
+  }
+
+  /**
+   * Test rename key for om.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testRenameKey() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyRenames = omMetrics.getNumKeyRenames();
+    long numKeyRenameFails = omMetrics.getNumKeyRenameFails();
+    int testRenameFails = 0;
+    int testRenames = 0;
+    IOException ioe = null;
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(100);
+    String toKeyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    // Rename from non-existent key should fail
+    try {
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    // Write the contents of the key to be renamed
+    String dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+
+    // Rename the key
+    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
+    testRenames++;
+    storageHandler.renameKey(keyArgs, toKeyName);
+    Assert.assertEquals(numKeyRenames + testRenames,
+        omMetrics.getNumKeyRenames());
+    Assert.assertEquals(numKeyRenameFails + testRenameFails,
+        omMetrics.getNumKeyRenameFails());
+
+    // Try to get the key, should fail as it has been renamed
+    try {
+      storageHandler.newKeyReader(keyArgs);
+    } catch (IOException e) {
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
+
+    // Verify the contents of the renamed key
+    keyArgs = new KeyArgs(toKeyName, bucketArgs);
+    InputStream in = storageHandler.newKeyReader(keyArgs);
+    byte[] b = new byte[dataString.getBytes().length];
+    in.read(b);
+    Assert.assertEquals(new String(b), dataString);
+
+    // Rewrite the renamed key. Rename to key which already exists should fail.
+    keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(100);
+    dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+      stream.close();
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    // Rename to empty string should fail
+    toKeyName = "";
+    try {
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    // Rename from empty string should fail
+    keyArgs = new KeyArgs("", bucketArgs);
+    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
+    try {
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    Assert.assertEquals(numKeyRenames + testRenames,
+        omMetrics.getNumKeyRenames());
+    Assert.assertEquals(numKeyRenameFails + testRenameFails,
+        omMetrics.getNumKeyRenameFails());
+  }
+
+  @Test(timeout = 60000)
+  public void testListBuckets() throws IOException, OzoneException {
+    ListBuckets result = null;
+    ListArgs listBucketArgs = null;
+
+    // Create volume - volA.
+    final String volAname = "volA";
+    VolumeArgs volAArgs = new VolumeArgs(volAname, userArgs);
+    volAArgs.setUserName("userA");
+    volAArgs.setAdminName("adminA");
+    storageHandler.createVolume(volAArgs);
+
+    // Create 20 buckets in volA for tests.
+    for (int i=0; i<10; i++) {
+      // Create "/volA/aBucket_0" to "/volA/aBucket_9" buckets in volA volume.
+      BucketArgs aBuckets = new BucketArgs(volAname,
+          "aBucket_" + i, userArgs);
+      if(i % 3 == 0) {
+        aBuckets.setStorageType(StorageType.ARCHIVE);
+      } else {
+        aBuckets.setStorageType(StorageType.DISK);
+      }
+      storageHandler.createBucket(aBuckets);
+
+      // Create "/volA/bBucket_0" to "/volA/bBucket_9" buckets in volA volume.
+      BucketArgs bBuckets = new BucketArgs(volAname,
+          "bBucket_" + i, userArgs);
+      if(i % 3 == 0) {
+        bBuckets.setStorageType(StorageType.RAM_DISK);
+      } else {
+        bBuckets.setStorageType(StorageType.SSD);
+      }
+      storageHandler.createBucket(bBuckets);
+    }
+
+    VolumeArgs volArgs = new VolumeArgs(volAname, userArgs);
+
+    // List all buckets in volA.
+    listBucketArgs = new ListArgs(volArgs, null, 100, null);
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(20, result.getBuckets().size());
+    List<BucketInfo> archiveBuckets = result.getBuckets().stream()
+        .filter(item -> item.getStorageType() == StorageType.ARCHIVE)
+        .collect(Collectors.toList());
+    Assert.assertEquals(4, archiveBuckets.size());
+
+    // List buckets with prefix "aBucket".
+    listBucketArgs = new ListArgs(volArgs, "aBucket", 100, null);
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(10, result.getBuckets().size());
+    Assert.assertTrue(result.getBuckets().stream()
+        .allMatch(entry -> entry.getBucketName().startsWith("aBucket")));
+
+    // List a certain number of buckets.
+    listBucketArgs = new ListArgs(volArgs, null, 3, null);
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(3, result.getBuckets().size());
+    Assert.assertEquals("aBucket_0",
+        result.getBuckets().get(0).getBucketName());
+    Assert.assertEquals("aBucket_1",
+        result.getBuckets().get(1).getBucketName());
+    Assert.assertEquals("aBucket_2",
+        result.getBuckets().get(2).getBucketName());
+
+    // List a certain number of buckets from the startKey.
+    listBucketArgs = new ListArgs(volArgs, null, 2, "bBucket_3");
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(2, result.getBuckets().size());
+    Assert.assertEquals("bBucket_4",
+        result.getBuckets().get(0).getBucketName());
+    Assert.assertEquals("bBucket_5",
+        result.getBuckets().get(1).getBucketName());
+
+    // Provide an invalid bucket name as start key.
+    listBucketArgs = new ListArgs(volArgs, null, 100, "unknown_bucket_name");
+    ListBuckets buckets = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(buckets.getBuckets().size(), 0);
+
+    // Use all arguments.
+    listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_7");
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(2, result.getBuckets().size());
+    Assert.assertEquals("bBucket_8",
+        result.getBuckets().get(0).getBucketName());
+    Assert.assertEquals("bBucket_9",
+        result.getBuckets().get(1).getBucketName());
+
+    // Provide an invalid maxKeys argument.
+    try {
+      listBucketArgs = new ListArgs(volArgs, null, -1, null);
+      storageHandler.listBuckets(listBucketArgs);
+      Assert.fail("Expecting an error when the given"
+          + " maxKeys argument is invalid.");
+    } catch (Exception e) {
+      Assert.assertTrue(e.getMessage()
+          .contains(String.format("the value must be in range (0, %d]",
+              OzoneConsts.MAX_LISTBUCKETS_SIZE)));
+    }
+
+    // Provide an invalid volume name.
+    VolumeArgs invalidVolArgs = new VolumeArgs("invalid_name", userArgs);
+    try {
+      listBucketArgs = new ListArgs(invalidVolArgs, null, 100, null);
+      storageHandler.listBuckets(listBucketArgs);
+      Assert.fail("Expecting an error when the given volume name is invalid.");
+    } catch (Exception e) {
+      Assert.assertTrue(e instanceof IOException);
+      Assert.assertTrue(e.getMessage()
+          .contains(Status.VOLUME_NOT_FOUND.name()));
+    }
+  }
+
+  /**
+   * Test list keys.
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testListKeys() throws IOException, OzoneException {
+    ListKeys result = null;
+    ListArgs listKeyArgs = null;
+
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    // Write 20 keys in bucket.
+    int numKeys = 20;
+    String keyName = "Key";
+    KeyArgs keyArgs = null;
+    for (int i = 0; i < numKeys; i++) {
+      if (i % 2 == 0) {
+        // Create /volume/bucket/aKey[0,2,4,...,18] in bucket.
+        keyArgs = new KeyArgs("a" + keyName + i, bucketArgs);
+      } else {
+        // Create /volume/bucket/bKey[1,3,5,...,19] in bucket.
+        keyArgs = new KeyArgs("b" + keyName + i, bucketArgs);
+      }
+      keyArgs.setSize(4096);
+
+      // Just for testing list keys call, so no need to write real data.
+      OutputStream stream = storageHandler.newKeyWriter(keyArgs);
+      stream.close();
+    }
+
+    // List all keys in bucket.
+    bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    listKeyArgs = new ListArgs(bucketArgs, null, 100, null);
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(numKeys, result.getKeyList().size());
+
+    // List keys with prefix "aKey".
+    listKeyArgs = new ListArgs(bucketArgs, "aKey", 100, null);
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(numKeys / 2, result.getKeyList().size());
+    Assert.assertTrue(result.getKeyList().stream()
+        .allMatch(entry -> entry.getKeyName().startsWith("aKey")));
+
+    // List a certain number of keys.
+    listKeyArgs = new ListArgs(bucketArgs, null, 3, null);
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(3, result.getKeyList().size());
+    Assert.assertEquals("aKey0",
+        result.getKeyList().get(0).getKeyName());
+    Assert.assertEquals("aKey10",
+        result.getKeyList().get(1).getKeyName());
+    Assert.assertEquals("aKey12",
+        result.getKeyList().get(2).getKeyName());
+
+    // List a certain number of keys from the startKey.
+    listKeyArgs = new ListArgs(bucketArgs, null, 2, "bKey1");
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(2, result.getKeyList().size());
+    Assert.assertEquals("bKey11",
+        result.getKeyList().get(0).getKeyName());
+    Assert.assertEquals("bKey13",
+        result.getKeyList().get(1).getKeyName());
+
+    // Provide an invalid key name as start key.
+    listKeyArgs = new ListArgs(bucketArgs, null, 100, "invalid_start_key");
+    ListKeys keys = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(keys.getKeyList().size(), 0);
+
+    // Provide an invalid maxKeys argument.
+    try {
+      listKeyArgs = new ListArgs(bucketArgs, null, -1, null);
+      storageHandler.listBuckets(listKeyArgs);
+      Assert.fail("Expecting an error when the given"
+          + " maxKeys argument is invalid.");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains(
+          String.format("the value must be in range (0, %d]",
+              OzoneConsts.MAX_LISTKEYS_SIZE), e);
+    }
+
+    // Provide an invalid bucket name.
+    bucketArgs = new BucketArgs("invalid_bucket", createVolumeArgs);
+    try {
+      listKeyArgs = new ListArgs(bucketArgs, null, numKeys, null);
+      storageHandler.listKeys(listKeyArgs);
+      Assert.fail(
+          "Expecting an error when the given bucket name is invalid.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          Status.BUCKET_NOT_FOUND.name(), e);
+    }
+  }
+
+  @Test
+  public void testListVolumes() throws IOException, OzoneException {
+
+    String user0 = "testListVolumes-user-0";
+    String user1 = "testListVolumes-user-1";
+    String adminUser = "testListVolumes-admin";
+    ListArgs listVolumeArgs;
+    ListVolumes volumes;
+
+    // Create 10 volumes by user0 and user1
+    String[] user0vols = new String[10];
+    String[] user1vols = new String[10];
+    for (int i =0; i<10; i++) {
+      VolumeArgs createVolumeArgs;
+      String user0VolName = "Vol-" + user0 + "-" + i;
+      user0vols[i] = user0VolName;
+      createVolumeArgs = new VolumeArgs(user0VolName, userArgs);
+      createVolumeArgs.setUserName(user0);
+      createVolumeArgs.setAdminName(adminUser);
+      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
+      storageHandler.createVolume(createVolumeArgs);
+
+      String user1VolName = "Vol-" + user1 + "-" + i;
+      user1vols[i] = user1VolName;
+      createVolumeArgs = new VolumeArgs(user1VolName, userArgs);
+      createVolumeArgs.setUserName(user1);
+      createVolumeArgs.setAdminName(adminUser);
+      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
+      storageHandler.createVolume(createVolumeArgs);
+    }
+
+    // Test list all volumes
+    UserArgs userArgs0 = new UserArgs(user0, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    listVolumeArgs = new ListArgs(userArgs0, "Vol-testListVolumes", 100, null);
+    listVolumeArgs.setRootScan(true);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(20, volumes.getVolumes().size());
+
+    // Test list all volumes belongs to an user
+    listVolumeArgs = new ListArgs(userArgs0, null, 100, null);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(10, volumes.getVolumes().size());
+
+    // Test prefix
+    listVolumeArgs = new ListArgs(userArgs0,
+        "Vol-" + user0 + "-3", 100, null);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(1, volumes.getVolumes().size());
+    Assert.assertEquals(user0vols[3],
+        volumes.getVolumes().get(0).getVolumeName());
+    Assert.assertEquals(user0,
+        volumes.getVolumes().get(0).getOwner().getName());
+
+    // Test list volumes by user
+    UserArgs userArgs1 = new UserArgs(user1, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    listVolumeArgs = new ListArgs(userArgs1, null, 100, null);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(10, volumes.getVolumes().size());
+    Assert.assertEquals(user1,
+        volumes.getVolumes().get(3).getOwner().getName());
+
+    // Make sure all available fields are returned
+    final String user0vol4 = "Vol-" + user0 + "-4";
+    final String user0vol5 = "Vol-" + user0 + "-5";
+    listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol4);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(1, volumes.getVolumes().size());
+    Assert.assertEquals(user0,
+        volumes.getVolumes().get(0).getOwner().getName());
+    Assert.assertEquals(user0vol5,
+        volumes.getVolumes().get(0).getVolumeName());
+    Assert.assertEquals(5,
+        volumes.getVolumes().get(0).getQuota().getSize());
+    Assert.assertEquals(OzoneQuota.Units.GB,
+        volumes.getVolumes().get(0).getQuota().getUnit());
+
+    // User doesn't have volumes
+    UserArgs userArgsX = new UserArgs("unknwonUser", OzoneUtils.getRequestID(),
+        null, null, null, null);
+    listVolumeArgs = new ListArgs(userArgsX, null, 100, null);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(0, volumes.getVolumes().size());
+  }
+
+  /**
+   * Test get key information.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testGetKeyInfo() throws IOException,
+      OzoneException, ParseException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    long currentTime = Time.now();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String keyName = "testKey";
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(4096);
+
+
+    OutputStream stream = storageHandler.newKeyWriter(keyArgs);
+    stream.close();
+
+    KeyInfo keyInfo = storageHandler.getKeyInfo(keyArgs);
+    // Compare the time in second unit since the date string reparsed to
+    // millisecond will lose precision.
+    Assert.assertTrue(
+        (HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()) / 1000) >= (
+            currentTime / 1000));
+    Assert.assertTrue(
+        (HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()) / 1000) >= (
+            currentTime / 1000));
+    Assert.assertEquals(keyName, keyInfo.getKeyName());
+    // with out data written, the size would be 0
+    Assert.assertEquals(0, keyInfo.getSize());
+  }
+
+  /**
+   * Test that the write can proceed without having to set the right size.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testWriteSize() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(100);
+    // write a key without specifying size at all
+    String keyName = "testKey";
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    byte[] data = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
+
+    // write a key with a size, but write above it.
+    String keyName1 = "testKey1";
+    KeyArgs keyArgs1 = new KeyArgs(keyName1, bucketArgs);
+    keyArgs1.setSize(30);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs1)) {
+      stream.write(dataString.getBytes());
+    }
+    byte[] data1 = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs1)) {
+      in.read(data1);
+    }
+    Assert.assertEquals(dataString, DFSUtil.bytes2String(data1));
+  }
+
+  /**
+   * Tests the RPC call for getting scmId and clusterId from SCM.
+   * @throws IOException
+   */
+  @Test
+  public void testGetScmInfo() throws IOException {
+    ScmInfo info = cluster.getOzoneManager().getScmInfo();
+    Assert.assertEquals(clusterId, info.getClusterId());
+    Assert.assertEquals(scmId, info.getScmId());
+  }
+
+
+  @Test
+  public void testExpiredOpenKey() throws Exception {
+    BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
+        .getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
+
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    // open some keys.
+
+    KeyArgs keyArgs1 = new KeyArgs("testKey1", bucketArgs);
+    KeyArgs keyArgs2 = new KeyArgs("testKey2", bucketArgs);
+    KeyArgs keyArgs3 = new KeyArgs("testKey3", bucketArgs);
+    KeyArgs keyArgs4 = new KeyArgs("testKey4", bucketArgs);
+    List<BlockGroup> openKeys;
+    storageHandler.newKeyWriter(keyArgs1);
+    storageHandler.newKeyWriter(keyArgs2);
+    storageHandler.newKeyWriter(keyArgs3);
+    storageHandler.newKeyWriter(keyArgs4);
+
+    Set<String> expected = Stream.of(
+        "testKey1", "testKey2", "testKey3", "testKey4")
+        .collect(Collectors.toSet());
+
+    // Now all k1-k4 should be in open state, so ExpiredOpenKeys should not
+    // contain these values.
+    openKeys = cluster.getOzoneManager()
+        .getMetadataManager().getExpiredOpenKeys();
+
+    for (BlockGroup bg : openKeys) {
+      String[] subs = bg.getGroupID().split("/");
+      String keyName = subs[subs.length - 1];
+      Assert.assertFalse(expected.contains(keyName));
+    }
+
+    Thread.sleep(2000);
+    // Now all k1-k4 should be in ExpiredOpenKeys
+    openKeys = cluster.getOzoneManager()
+        .getMetadataManager().getExpiredOpenKeys();
+    for (BlockGroup bg : openKeys) {
+      String[] subs = bg.getGroupID().split("/");
+      String keyName = subs[subs.length - 1];
+      if (expected.contains(keyName)) {
+        expected.remove(keyName);
+      }
+    }
+    Assert.assertEquals(0, expected.size());
+
+    KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs);
+    storageHandler.newKeyWriter(keyArgs5);
+
+    openKeyCleanUpService.triggerBackgroundTaskForTesting();
+    Thread.sleep(2000);
+    // now all k1-k4 should have been removed by the clean-up task, only k5
+    // should be present in ExpiredOpenKeys.
+    openKeys =
+        cluster.getOzoneManager().getMetadataManager().getExpiredOpenKeys();
+    System.out.println(openKeys);
+    boolean key5found = false;
+    Set<String> removed = Stream.of(
+        "testKey1", "testKey2", "testKey3", "testKey4")
+        .collect(Collectors.toSet());
+    for (BlockGroup bg : openKeys) {
+      String[] subs = bg.getGroupID().split("/");
+      String keyName = subs[subs.length - 1];
+      Assert.assertFalse(removed.contains(keyName));
+      if (keyName.equals("testKey5")) {
+        key5found = true;
+      }
+    }
+    Assert.assertTrue(key5found);
+  }
+
+  /**
+   * Tests the OM Initialization.
+   * @throws IOException
+   */
+  @Test
+  public void testOmInitialization() throws IOException {
+    // Read the version file info from OM version file
+    OMStorage omStorage = cluster.getOzoneManager().getOmStorage();
+    SCMStorage scmStorage = new SCMStorage(conf);
+    // asserts whether cluster Id and SCM ID are properly set in SCM Version
+    // file.
+    Assert.assertEquals(clusterId, scmStorage.getClusterID());
+    Assert.assertEquals(scmId, scmStorage.getScmId());
+    // asserts whether OM Id is properly set in OM Version file.
+    Assert.assertEquals(omId, omStorage.getOmId());
+    // asserts whether the SCM info is correct in OM Version file.
+    Assert.assertEquals(clusterId, omStorage.getClusterID());
+    Assert.assertEquals(scmId, omStorage.getScmId());
+  }
+
+  /**
+   * Tests the OM Initialization Failure.
+   * @throws IOException
+   */
+  @Test
+  public void testOmInitializationFailure() throws Exception {
+    OzoneConfiguration config = new OzoneConfiguration();
+    final String path =
+        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
+    Path metaDirPath = Paths.get(path, "om-meta");
+    config.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
+    config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
+    config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+        conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY));
+    exception.expect(OMException.class);
+    exception.expectMessage("OM not initialized.");
+    OzoneManager.createOm(null, config);
+    OMStorage omStore = new OMStorage(config);
+    omStore.setClusterId("testClusterId");
+    omStore.setScmId("testScmId");
+    // writes the version file properties
+    omStore.initialize();
+    exception.expect(OMException.class);
+    exception.expectMessage("SCM version info mismatch.");
+    OzoneManager.createOm(null, conf);
+  }
+
+  @Test
+  public void testGetServiceList() throws IOException {
+    long numGetServiceListCalls = omMetrics.getNumGetServiceLists();
+    List<ServiceInfo> services = cluster.getOzoneManager().getServiceList();
+
+    Assert.assertEquals(numGetServiceListCalls + 1,
+        omMetrics.getNumGetServiceLists());
+
+    ServiceInfo omInfo = services.stream().filter(
+        a -> a.getNodeType().equals(HddsProtos.NodeType.OM))
+        .collect(Collectors.toList()).get(0);
+    InetSocketAddress omAddress = new InetSocketAddress(omInfo.getHostname(),
+        omInfo.getPort(ServicePort.Type.RPC));
+    Assert.assertEquals(NetUtils.createSocketAddr(
+        conf.get(OZONE_OM_ADDRESS_KEY)), omAddress);
+
+    ServiceInfo scmInfo = services.stream().filter(
+        a -> a.getNodeType().equals(HddsProtos.NodeType.SCM))
+        .collect(Collectors.toList()).get(0);
+    InetSocketAddress scmAddress = new InetSocketAddress(scmInfo.getHostname(),
+        scmInfo.getPort(ServicePort.Type.RPC));
+    Assert.assertEquals(NetUtils.createSocketAddr(
+        conf.get(OZONE_SCM_CLIENT_ADDRESS_KEY)), scmAddress);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
new file mode 100644
index 0000000..8168d27
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.core.type.TypeReference;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServicePort;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.util.EntityUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddressForClients;
+
+/**
+ * This class is to test the REST interface exposed by OzoneManager.
+ */
+public class TestOzoneManagerRestInterface {
+
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testGetServiceList() throws Exception {
+    OzoneManagerHttpServer server =
+        cluster.getOzoneManager().getHttpServer();
+    HttpClient client = HttpClients.createDefault();
+    String connectionUri = "http://" +
+        NetUtils.getHostPortString(server.getHttpAddress());
+    HttpGet httpGet = new HttpGet(connectionUri + "/serviceList");
+    HttpResponse response = client.execute(httpGet);
+    String serviceListJson = EntityUtils.toString(response.getEntity());
+
+    ObjectMapper objectMapper = new ObjectMapper();
+    TypeReference<List<ServiceInfo>> serviceInfoReference =
+        new TypeReference<List<ServiceInfo>>() {};
+    List<ServiceInfo> serviceInfos = objectMapper.readValue(
+        serviceListJson, serviceInfoReference);
+    Map<HddsProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>();
+    for (ServiceInfo serviceInfo : serviceInfos) {
+      serviceMap.put(serviceInfo.getNodeType(), serviceInfo);
+    }
+
+    InetSocketAddress omAddress =
+        getOmAddressForClients(conf);
+    ServiceInfo omInfo = serviceMap.get(HddsProtos.NodeType.OM);
+
+    Assert.assertEquals(omAddress.getHostName(), omInfo.getHostname());
+    Assert.assertEquals(omAddress.getPort(),
+        omInfo.getPort(ServicePort.Type.RPC));
+    Assert.assertEquals(server.getHttpAddress().getPort(),
+        omInfo.getPort(ServicePort.Type.HTTP));
+
+    InetSocketAddress scmAddress =
+        getScmAddressForClients(conf);
+    ServiceInfo scmInfo = serviceMap.get(HddsProtos.NodeType.SCM);
+
+    Assert.assertEquals(scmAddress.getHostName(), scmInfo.getHostname());
+    Assert.assertEquals(scmAddress.getPort(),
+        scmInfo.getPort(ServicePort.Type.RPC));
+
+    ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE);
+    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
+        .getDatanodeDetails();
+    Assert.assertEquals(datanodeDetails.getHostName(),
+        datanodeInfo.getHostname());
+
+    Map<ServicePort.Type, Integer> ports = datanodeInfo.getPorts();
+    for(ServicePort.Type type : ports.keySet()) {
+      switch (type) {
+      case HTTP:
+      case HTTPS:
+        Assert.assertEquals(
+            datanodeDetails.getPort(DatanodeDetails.Port.Name.REST).getValue(),
+            ports.get(type));
+        break;
+      default:
+        // OM only sends Datanode's info port details
+        // i.e. HTTP or HTTPS
+        // Other ports are not expected as of now.
+        Assert.fail();
+        break;
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index ed8f0d5..5082870 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.RestClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.ozone.web.response.BucketInfo;
@@ -167,23 +167,23 @@ public class TestOzoneShell {
     System.setOut(new PrintStream(out));
     System.setErr(new PrintStream(err));
     if(clientProtocol.equals(RestClient.class)) {
-      String hostName = cluster.getKeySpaceManager().getHttpServer()
+      String hostName = cluster.getOzoneManager().getHttpServer()
           .getHttpAddress().getHostName();
       int port = cluster
-          .getKeySpaceManager().getHttpServer().getHttpAddress().getPort();
+          .getOzoneManager().getHttpServer().getHttpAddress().getPort();
       url = String.format("http://" + hostName + ":" + port);
     } else {
       List<ServiceInfo> services = null;
       try {
-        services = cluster.getKeySpaceManager().getServiceList();
+        services = cluster.getOzoneManager().getServiceList();
       } catch (IOException e) {
-        LOG.error("Could not get service list from KSM");
+        LOG.error("Could not get service list from OM");
       }
       String hostName = services.stream().filter(
-          a -> a.getNodeType().equals(HddsProtos.NodeType.KSM))
+          a -> a.getNodeType().equals(HddsProtos.NodeType.OM))
           .collect(Collectors.toList()).get(0).getHostname();
 
-      String port = cluster.getKeySpaceManager().getRpcPort();
+      String port = cluster.getOzoneManager().getRpcPort();
       url = String.format("o3://" + hostName + ":" + port);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index b4ed2b1..1a1f37c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacem
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.scm.cli.SQLCLI;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -113,7 +112,7 @@ public class TestContainerSQLCli {
     cluster.waitForClusterToBeReady();
     datanodeIpAddress = cluster.getHddsDatanodes().get(0)
         .getDatanodeDetails().getIpAddress();
-    cluster.getKeySpaceManager().stop();
+    cluster.getOzoneManager().stop();
     cluster.getStorageContainerManager().stop();
 
     nodeManager = cluster.getStorageContainerManager().getScmNodeManager();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
index 0e61391..e592d56 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
@@ -90,7 +90,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumes() throws IOException {
     super.testCreateVolumes(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -102,7 +102,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithQuota() throws IOException {
     super.testCreateVolumesWithQuota(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -114,7 +114,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithInvalidQuota() throws IOException {
     super.testCreateVolumesWithInvalidQuota(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -128,7 +128,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithInvalidUser() throws IOException {
     super.testCreateVolumesWithInvalidUser(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -143,7 +143,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithOutAdminRights() throws IOException {
     super.testCreateVolumesWithOutAdminRights(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -155,7 +155,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesInLoop() throws IOException {
     super.testCreateVolumesInLoop(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index b86c577..a95bd0e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -48,13 +48,13 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .Status;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@@ -644,15 +644,15 @@ public class TestKeys {
     }
   }
 
-  private int countKsmKeys(KeySpaceManager ksm) throws IOException {
+  private int countOmKeys(OzoneManager om) throws IOException {
     int totalCount = 0;
-    List<KsmVolumeArgs> volumes =
-        ksm.listAllVolumes(null, null, Integer.MAX_VALUE);
-    for (KsmVolumeArgs volume : volumes) {
-      List<KsmBucketInfo> buckets =
-          ksm.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE);
-      for (KsmBucketInfo bucket : buckets) {
-        List<KsmKeyInfo> keys = ksm.listKeys(bucket.getVolumeName(),
+    List<OmVolumeArgs> volumes =
+        om.listAllVolumes(null, null, Integer.MAX_VALUE);
+    for (OmVolumeArgs volume : volumes) {
+      List<OmBucketInfo> buckets =
+          om.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE);
+      for (OmBucketInfo bucket : buckets) {
+        List<OmKeyInfo> keys = om.listKeys(bucket.getVolumeName(),
             bucket.getBucketName(), null, null, Integer.MAX_VALUE);
         totalCount += keys.size();
       }
@@ -662,10 +662,10 @@ public class TestKeys {
 
   @Test
   public void testDeleteKey() throws Exception {
-    KeySpaceManager ksm = ozoneCluster.getKeySpaceManager();
+    OzoneManager ozoneManager = ozoneCluster.getOzoneManager();
     // To avoid interference from other test cases,
     // we collect number of existing keys at the beginning
-    int numOfExistedKeys = countKsmKeys(ksm);
+    int numOfExistedKeys = countOmKeys(ozoneManager);
 
     // Keep tracking bucket keys info while creating them
     PutHelper helper = new PutHelper(client, path);
@@ -689,15 +689,15 @@ public class TestKeys {
     // count the total number of created keys.
     Set<Pair<String, String>> buckets = bucketKeys.getAllBuckets();
     for (Pair<String, String> buk : buckets) {
-      List<KsmKeyInfo> createdKeys =
-          ksm.listKeys(buk.getKey(), buk.getValue(), null, null, 20);
+      List<OmKeyInfo> createdKeys =
+          ozoneManager.listKeys(buk.getKey(), buk.getValue(), null, null, 20);
 
       // Memorize chunks that has been created,
       // so we can verify actual deletions at DN side later.
-      for (KsmKeyInfo keyInfo : createdKeys) {
-        List<KsmKeyLocationInfo> locations =
+      for (OmKeyInfo keyInfo : createdKeys) {
+        List<OmKeyLocationInfo> locations =
             keyInfo.getLatestVersionLocations().getLocationList();
-        for (KsmKeyLocationInfo location : locations) {
+        for (OmKeyLocationInfo location : locations) {
           KeyData keyData = new KeyData(location.getBlockID());
           KeyData blockInfo = cm.getContainerManager()
               .getKeyManager().getKey(keyData);
@@ -721,9 +721,9 @@ public class TestKeys {
     // Ensure all keys are created.
     Assert.assertEquals(20, numOfCreatedKeys);
 
-    // Ensure all keys are visible from KSM.
+    // Ensure all keys are visible from OM.
     // Total number should be numOfCreated + numOfExisted
-    Assert.assertEquals(20 + numOfExistedKeys, countKsmKeys(ksm));
+    Assert.assertEquals(20 + numOfExistedKeys, countOmKeys(ozoneManager));
 
     // Delete 10 keys
     int delCount = 20;
@@ -732,21 +732,21 @@ public class TestKeys {
       List<String> bks = bucketKeys.getBucketKeys(bucketInfo.getValue());
       for (String keyName : bks) {
         if (delCount > 0) {
-          KsmKeyArgs arg =
-              new KsmKeyArgs.Builder().setVolumeName(bucketInfo.getKey())
+          OmKeyArgs arg =
+              new OmKeyArgs.Builder().setVolumeName(bucketInfo.getKey())
                   .setBucketName(bucketInfo.getValue()).setKeyName(keyName)
                   .build();
-          ksm.deleteKey(arg);
+          ozoneManager.deleteKey(arg);
           delCount--;
         }
       }
     }
 
-    // It should be pretty quick that keys are removed from KSM namespace,
+    // It should be pretty quick that keys are removed from OM namespace,
     // because actual deletion happens in async mode.
     GenericTestUtils.waitFor(() -> {
       try {
-        int num = countKsmKeys(ksm);
+        int num = countOmKeys(ozoneManager);
         return num == (numOfExistedKeys);
       } catch (IOException e) {
         return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep
deleted file mode 100644
index 09697dc..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
new file mode 100644
index 0000000..09697dc
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
@@ -0,0 +1,15 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
index 3128d31..2200cd8 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
 import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
 import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
@@ -34,9 +34,8 @@ import com.sun.jersey.api.container.ContainerFactory;
 import com.sun.jersey.api.core.ApplicationAdapter;
 
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.ObjectStoreApplication;
 import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
@@ -72,8 +71,8 @@ public final class ObjectStoreHandler implements Closeable {
       LoggerFactory.getLogger(ObjectStoreHandler.class);
 
   private final ObjectStoreJerseyContainer objectStoreJerseyContainer;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB
-      keySpaceManagerClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB
+      ozoneManagerClient;
   private final StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private final ScmBlockLocationProtocolClientSideTranslatorPB
@@ -119,28 +118,28 @@ public final class ObjectStoreHandler implements Closeable {
                   NetUtils.getDefaultSocketFactory(conf),
                   Client.getRpcTimeout(conf)));
 
-      RPC.setProtocolEngine(conf, KeySpaceManagerProtocolPB.class,
+      RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
           ProtobufRpcEngine.class);
-      long ksmVersion =
-          RPC.getProtocolVersion(KeySpaceManagerProtocolPB.class);
-      InetSocketAddress ksmAddress = getKsmAddress(conf);
-      this.keySpaceManagerClient =
-          new KeySpaceManagerProtocolClientSideTranslatorPB(
-              RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion,
-              ksmAddress, UserGroupInformation.getCurrentUser(), conf,
+      long omVersion =
+          RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+      InetSocketAddress omAddress = getOmAddress(conf);
+      this.ozoneManagerClient =
+          new OzoneManagerProtocolClientSideTranslatorPB(
+              RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
+                  omAddress, UserGroupInformation.getCurrentUser(), conf,
               NetUtils.getDefaultSocketFactory(conf),
               Client.getRpcTimeout(conf)));
 
       storageHandler = new DistributedStorageHandler(
           new OzoneConfiguration(conf),
           this.storageContainerLocationClient,
-          this.keySpaceManagerClient);
+          this.ozoneManagerClient);
     } else {
       if (OzoneConsts.OZONE_HANDLER_LOCAL.equalsIgnoreCase(shType)) {
         storageHandler = new LocalStorageHandler(conf);
         this.storageContainerLocationClient = null;
         this.scmBlockLocationClient = null;
-        this.keySpaceManagerClient = null;
+        this.ozoneManagerClient = null;
       } else {
         throw new IllegalArgumentException(
             String.format("Unrecognized value for %s: %s,"
@@ -186,6 +185,6 @@ public final class ObjectStoreHandler implements Closeable {
     storageHandler.close();
     IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
     IOUtils.cleanupWithLogger(LOG, scmBlockLocationClient);
-    IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient);
+    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
index ef0293e..ad48787 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.web.handlers;
 import org.apache.commons.codec.binary.Base64;
 
 import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.headers.Header;
@@ -102,7 +102,7 @@ public abstract class KeyProcessTemplate {
       LOG.error("IOException:", fsExp);
       // Map KEY_NOT_FOUND to INVALID_KEY
       if (fsExp.getMessage().endsWith(
-          KeySpaceManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) {
+          OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) {
         throw ErrorTable.newError(ErrorTable.INVALID_KEY, userArgs, fsExp);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
index 1d98400..fb95bb9 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
@@ -30,7 +30,7 @@ import java.nio.file.NoSuchFileException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ozone.OzoneRestUtils;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.interfaces.UserAuth;
@@ -135,7 +135,7 @@ public abstract class VolumeProcessTemplate {
     OzoneException exp = null;
 
     if ((fsExp != null && fsExp.getMessage().endsWith(
-        KeySpaceManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name()))
+        OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name()))
         || fsExp instanceof FileAlreadyExistsException) {
       exp = ErrorTable
           .newError(ErrorTable.VOLUME_ALREADY_EXISTS, reqID, volume, hostName);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by bh...@apache.org.
YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a129e3e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a129e3e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a129e3e7

Branch: refs/heads/HDDS-48
Commit: a129e3e74e16ed039d637dc1499dc3e5df317d94
Parents: 9edc74f
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Jul 5 10:54:19 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Fri Jul 6 11:04:00 2018 -0700

----------------------------------------------------------------------
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    |   9 +-
 .../webapp/TestRMWebServices.java               |  31 ++-
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 +++++++++++++++++
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 +++----
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ++++++++++
 .../FairSchedulerJsonVerifications.java         | 139 ++++++++++
 .../FairSchedulerXmlVerifications.java          | 153 +++++++++++
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 +++++++++++++++++++
 .../webapp/helper/AppInfoJsonVerifications.java | 123 +++++++++
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 +++++++++
 .../webapp/helper/BufferedClientResponse.java   |  57 ++++
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ++++++
 .../ResourceRequestsJsonVerifications.java      | 252 +++++++++++++++++
 .../ResourceRequestsXmlVerifications.java       | 215 +++++++++++++++
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 ++++++++
 21 files changed, 2020 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index d47f13d..9d82bc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -479,7 +479,7 @@ public class AppInfo {
   public int getNumNonAMContainersPreempted() {
     return numNonAMContainerPreempted;
   }
-  
+
   public int getNumAMContainersPreempted() {
     return numAMContainerPreempted;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 81491b1..163f707 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -41,8 +41,9 @@ public class SchedulerInfo {
   protected EnumSet<SchedulerResourceTypes> schedulingResourceTypes;
   protected int maximumClusterPriority;
 
+  // JAXB needs this
   public SchedulerInfo() {
-  } // JAXB needs this
+  }
 
   public SchedulerInfo(final ResourceManager rm) {
     ResourceScheduler rs = rm.getResourceScheduler();
@@ -74,7 +75,10 @@ public class SchedulerInfo {
   }
 
   public String getSchedulerResourceTypes() {
-    return Arrays.toString(minAllocResource.getResource().getResources());
+    if (minAllocResource != null) {
+      return Arrays.toString(minAllocResource.getResource().getResources());
+    }
+    return null;
   }
 
   public int getMaxClusterLevelAppPriority() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 76a5af5..70f83ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -48,6 +48,9 @@ import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Test;
 
+/**
+ * Tests fair scheduler configuration.
+ */
 public class TestFairSchedulerConfiguration {
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
@@ -242,12 +245,12 @@ public class TestFairSchedulerConfiguration {
         parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
             + "test1 = 50 % ").getResource(clusterResource));
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testNoUnits() throws Exception {
     parseResourceConfigValue("1024");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testOnlyMemory() throws Exception {
     parseResourceConfigValue("1024mb");
@@ -257,7 +260,7 @@ public class TestFairSchedulerConfiguration {
   public void testOnlyCPU() throws Exception {
     parseResourceConfigValue("1024vcores");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testGibberish() throws Exception {
     parseResourceConfigValue("1o24vc0res");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 0702d65..3902889 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -53,11 +53,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
-import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.*;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -76,11 +72,12 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
-import org.eclipse.jetty.server.Response;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -96,6 +93,8 @@ import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 public class TestRMWebServices extends JerseyTestBase {
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestRMWebServices.class);
 
   private static MockRM rm;
 
@@ -472,19 +471,19 @@ public class TestRMWebServices extends JerseyTestBase {
     QueueMetrics metrics = rs.getRootQueueMetrics();
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
 
-    long totalMBExpect = 
+    long totalMBExpect =
         metrics.getAvailableMB() + metrics.getAllocatedMB();
-    long totalVirtualCoresExpect = 
+    long totalVirtualCoresExpect =
         metrics.getAvailableVirtualCores() + metrics.getAllocatedVirtualCores();
-    assertEquals("appsSubmitted doesn't match", 
+    assertEquals("appsSubmitted doesn't match",
         metrics.getAppsSubmitted(), submittedApps);
-    assertEquals("appsCompleted doesn't match", 
+    assertEquals("appsCompleted doesn't match",
         metrics.getAppsCompleted(), completedApps);
     assertEquals("reservedMB doesn't match",
         metrics.getReservedMB(), reservedMB);
-    assertEquals("availableMB doesn't match", 
+    assertEquals("availableMB doesn't match",
         metrics.getAvailableMB(), availableMB);
-    assertEquals("allocatedMB doesn't match", 
+    assertEquals("allocatedMB doesn't match",
         metrics.getAllocatedMB(), allocMB);
     assertEquals("reservedVirtualCores doesn't match",
         metrics.getReservedVirtualCores(), reservedVirtualCores);
@@ -597,11 +596,13 @@ public class TestRMWebServices extends JerseyTestBase {
 
   public void verifyClusterSchedulerFifo(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 11, info.length());
+
+    LOG.debug("schedulerInfo: {}", info);
+    assertEquals("incorrect number of elements in: " + info, 11, info.length());
 
     verifyClusterSchedulerFifoGeneric(info.getString("type"),
         info.getString("qstate"), (float) info.getDouble("capacity"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 6c6f400..15f94e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -79,7 +79,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 public class TestRMWebServicesApps extends JerseyTestBase {
 
   private static MockRM rm;
-  
+
   private static final int CONTAINER_MB = 1024;
 
   private static class WebServletModule extends ServletModule {
@@ -324,7 +324,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
@@ -375,12 +375,12 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
         array.getJSONObject(1).getString("state").equals("ACCEPTED")));
-    
+
     rm.stop();
   }
 
@@ -511,7 +511,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("finalStatus", FinalApplicationStatus.UNDEFINED.toString())
+        .path("apps").queryParam("finalStatus",
+                    FinalApplicationStatus.UNDEFINED.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
@@ -1804,7 +1805,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     int numAttempt = 1;
     while (true) {
       // fail the AM by sending CONTAINER_FINISHED event without registering.
-      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1,
+              ContainerState.COMPLETE);
       rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FAILED);
       if (numAttempt == maxAppAttempts) {
         rm.waitForState(app1.getApplicationId(), RMAppState.FAILED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
new file mode 100644
index 0000000..83e0056
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
@@ -0,0 +1,242 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler.CustomResourceTypesConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.BufferedClientResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.JsonCustomResourceTypeTestcase;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.ws.rs.core.MediaType;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This test verifies that custom resource types are correctly serialized to XML
+ * and JSON when HTTP GET request is sent to the resource: ws/v1/cluster/apps.
+ */
+public class TestRMWebServicesAppsCustomResourceTypes extends JerseyTestBase {
+
+  private static MockRM rm;
+  private static final int CONTAINER_MB = 1024;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      Configuration conf = new Configuration();
+      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+          YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(Configuration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  public TestRMWebServicesAppsCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testRunningAppXml() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    XmlCustomResourceTypeTestCase testCase =
+            new XmlCustomResourceTypeTestCase(path,
+                    new BufferedClientResponse(response));
+    testCase.verify(document -> {
+      NodeList apps = document.getElementsByTagName("apps");
+      assertEquals("incorrect number of apps elements", 1, apps.getLength());
+
+      NodeList appArray = ((Element)(apps.item(0)))
+              .getElementsByTagName("app");
+      assertEquals("incorrect number of app elements", 1, appArray.getLength());
+
+      verifyAppsXML(appArray, app1);
+    });
+
+    rm.stop();
+  }
+
+  @Test
+  public void testRunningAppJson() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        assertEquals("incorrect number of apps elements", 1, json.length());
+        JSONObject apps = json.getJSONObject("apps");
+        assertEquals("incorrect number of app elements", 1, apps.length());
+        JSONArray array = apps.getJSONArray("app");
+        assertEquals("incorrect count of app", 1, array.length());
+
+        verifyAppInfoJson(array.getJSONObject(0), app1);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+
+    rm.stop();
+  }
+
+  private void verifyAppsXML(NodeList appArray, RMApp app) {
+    for (int i = 0; i < appArray.getLength(); i++) {
+      Element element = (Element) appArray.item(i);
+      AppInfoXmlVerifications.verify(element, app);
+
+      NodeList resourceRequests =
+          element.getElementsByTagName("resourceRequests");
+      assertEquals(1, resourceRequests.getLength());
+      Node resourceRequest = resourceRequests.item(0);
+      ResourceRequest rr =
+          ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+              .getApplicationAttempt(
+                  app.getCurrentAppAttempt().getAppAttemptId())
+              .getAppSchedulingInfo().getAllResourceRequests().get(0);
+      ResourceRequestsXmlVerifications.verifyWithCustomResourceTypes(
+              (Element) resourceRequest, rr,
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+    }
+  }
+
+  private void verifyAppInfoJson(JSONObject info, RMApp app) throws
+          JSONException {
+    int expectedNumberOfElements = getExpectedNumberOfElements(app);
+
+    assertEquals("incorrect number of elements", expectedNumberOfElements,
+        info.length());
+
+    AppInfoJsonVerifications.verify(info, app);
+
+    JSONArray resourceRequests = info.getJSONArray("resourceRequests");
+    JSONObject requestInfo = resourceRequests.getJSONObject(0);
+    ResourceRequest rr =
+        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+            .getApplicationAttempt(app.getCurrentAppAttempt().getAppAttemptId())
+            .getAppSchedulingInfo().getAllResourceRequests().get(0);
+
+    ResourceRequestsJsonVerifications.verifyWithCustomResourceTypes(
+            requestInfo, rr,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private int getExpectedNumberOfElements(RMApp app) {
+    int expectedNumberOfElements = 40 + 2; // 2 -> resourceRequests
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (app.getAMResourceRequests().get(0).getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (AppInfo
+        .getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()) != null) {
+      expectedNumberOfElements++;
+    }
+    return expectedNumberOfElements;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index e37f76f..46d0a66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -146,7 +146,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     config.setUserLimitFactor(B2, 100.0f);
     config.setCapacity(B3, 0.5f);
     config.setUserLimitFactor(B3, 100.0f);
-    
+
     config.setQueues(A1, new String[] {"a1a", "a1b"});
     final String A1A = A1 + ".a1a";
     config.setCapacity(A1A, 85);
@@ -254,7 +254,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     }
   }
 
-  public void verifySubQueueXML(Element qElem, String q, 
+  public void verifySubQueueXML(Element qElem, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws Exception {
     NodeList children = qElem.getChildNodes();
@@ -317,30 +317,34 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
   private void verifyClusterScheduler(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 8, info.length());
+    assertEquals("incorrect number of elements in: " + info, 8, info.length());
     verifyClusterSchedulerGeneric(info.getString("type"),
         (float) info.getDouble("usedCapacity"),
         (float) info.getDouble("capacity"),
         (float) info.getDouble("maxCapacity"), info.getString("queueName"));
     JSONObject health = info.getJSONObject("health");
     assertNotNull(health);
-    assertEquals("incorrect number of elements", 3, health.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        health.length());
     JSONArray operationsInfo = health.getJSONArray("operationsInfo");
-    assertEquals("incorrect number of elements", 4, operationsInfo.length());
+    assertEquals("incorrect number of elements in: " + health, 4,
+        operationsInfo.length());
     JSONArray lastRunDetails = health.getJSONArray("lastRunDetails");
-    assertEquals("incorrect number of elements", 3, lastRunDetails.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        lastRunDetails.length());
 
     JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
-    assertEquals("incorrect number of elements", 2, arr.length());
+    assertEquals("incorrect number of elements in: " + arr, 2, arr.length());
 
     // test subqueues
     for (int i = 0; i < arr.length(); i++) {
       JSONObject obj = arr.getJSONObject(i);
-      String q = CapacitySchedulerConfiguration.ROOT + "." + obj.getString("queueName");
+      String q = CapacitySchedulerConfiguration.ROOT + "." +
+              obj.getString("queueName");
       verifySubQueue(obj, q, 100, 100);
     }
   }
@@ -355,7 +359,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     assertTrue("queueName doesn't match", "root".matches(queueName));
   }
 
-  private void verifySubQueue(JSONObject info, String q, 
+  private void verifySubQueue(JSONObject info, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws JSONException, Exception {
     int numExpectedElements = 20;
@@ -464,7 +468,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
   }
 
-  //Return a child Node of node with the tagname or null if none exists 
+  //Return a child Node of node with the tagname or null if none exists
   private Node getChildNodeByName(Node node, String tagname) {
     NodeList nodeList = node.getChildNodes();
     for (int i=0; i < nodeList.getLength(); ++i) {
@@ -514,7 +518,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
           for (int j=0; j<users.getLength(); ++j) {
             Node user = users.item(j);
             String username = getChildNodeByName(user, "username")
-              .getTextContent(); 
+                .getTextContent();
             assertTrue(username.equals("user1") || username.equals("user2"));
             //Should be a parsable integer
             Integer.parseInt(getChildNodeByName(getChildNodeByName(user,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 3d28f12..99b5648 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response.Status;
@@ -59,6 +61,8 @@ import static org.junit.Assert.assertNull;
  * Test scheduler configuration mutation via REST API.
  */
 public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
+  private static final Logger LOG = LoggerFactory
+          .getLogger(TestRMWebServicesConfigurationMutation.class);
 
   private static final File CONF_FILE = new File(new File("target",
       "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
@@ -396,6 +400,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
             .entity(YarnWebServiceUtils.toJson(updateInfo,
                 SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
+    LOG.debug("Response headers: " + response.getHeaders());
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
     assertEquals(0.2f, newCSConf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index e77785b..58c72ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,13 +16,14 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import javax.ws.rs.core.MediaType;
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
 
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -30,6 +31,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -38,18 +42,18 @@ import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Before;
 import org.junit.Test;
+import javax.ws.rs.core.MediaType;
 
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
+/**
+ * Tests RM Webservices fair scheduler resources.
+ */
 public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   private static MockRM rm;
   private static YarnConfiguration conf;
-  
+
   private static class WebServletModule extends ServletModule {
     @Override
     protected void configureServlets() {
@@ -58,7 +62,7 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
       bind(GenericExceptionHandler.class);
       conf = new YarnConfiguration();
       conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-        ResourceScheduler.class);
+          ResourceScheduler.class);
       rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       serve("/*").with(GuiceContainer.class);
@@ -66,32 +70,32 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   static {
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   @Before
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   public TestRMWebServicesFairScheduler() {
     super(new WebAppDescriptor.Builder(
         "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-        .contextListenerClass(GuiceServletConfig.class)
-        .filterClass(com.google.inject.servlet.GuiceFilter.class)
-        .contextPath("jersey-guice-filter").servletPath("/").build());
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
   }
-  
+
   @Test
-  public void testClusterScheduler() throws JSONException, Exception {
+  public void testClusterScheduler() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -99,52 +103,51 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   @Test
-  public void testClusterSchedulerSlash() throws JSONException, Exception {
+  public void testClusterSchedulerSlash() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler/").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler/")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     verifyClusterScheduler(json);
   }
-  
+
   @Test
-  public void testClusterSchedulerWithSubQueues() throws JSONException,
-      Exception {
-    FairScheduler scheduler = (FairScheduler)rm.getResourceScheduler();
+  public void testClusterSchedulerWithSubQueues()
+      throws JSONException {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
     QueueManager queueManager = scheduler.getQueueManager();
     // create LeafQueue
     queueManager.getLeafQueue("root.q.subqueue1", true);
     queueManager.getLeafQueue("root.q.subqueue2", true);
 
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     JSONArray subQueueInfo = json.getJSONObject("scheduler")
         .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-        .getJSONObject("childQueues").getJSONArray("queue")
-        .getJSONObject(1).getJSONObject("childQueues").getJSONArray("queue");
+        .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(1)
+        .getJSONObject("childQueues").getJSONArray("queue");
     // subQueueInfo is consist of subqueue1 and subqueue2 info
     assertEquals(2, subQueueInfo.length());
 
     // Verify 'childQueues' field is omitted from FairSchedulerLeafQueueInfo.
     try {
       subQueueInfo.getJSONObject(1).getJSONObject("childQueues");
-      fail("FairSchedulerQueueInfo should omit field 'childQueues'" +
-           "if child queue is empty.");
+      fail("FairSchedulerQueueInfo should omit field 'childQueues'"
+          + "if child queue is empty.");
     } catch (JSONException je) {
       assertEquals("JSONObject[\"childQueues\"] not found.", je.getMessage());
     }
   }
 
-  private void verifyClusterScheduler(JSONObject json) throws JSONException,
-      Exception {
+  private void verifyClusterScheduler(JSONObject json) throws JSONException {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
     assertEquals("incorrect number of elements", 1, info.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
index 1e61186..40cf483 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
@@ -457,7 +457,7 @@ public class TestRMWebServicesSchedulerActivities
       if (object.getClass() == JSONObject.class) {
         assertEquals("Number of allocations is wrong", 1, realValue);
       } else if (object.getClass() == JSONArray.class) {
-        assertEquals("Number of allocations is wrong",
+        assertEquals("Number of allocations is wrong in: " + object,
             ((JSONArray) object).length(), realValue);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
new file mode 100644
index 0000000..bb1fce0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.LocalConfigurationProvider;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static java.util.stream.Collectors.toList;
+
+/**
+ * This class can generate an XML configuration file of custom resource types.
+ * See createInitialResourceTypes for the default values. All custom resource
+ * type is prefixed with CUSTOM_RESOURCE_PREFIX. Please use the
+ * getConfigurationInputStream method to get an InputStream of the XML. If you
+ * want to have different number of resources in your tests, please see usages
+ * of this class in this test class:
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}
+ *
+ */
+public class CustomResourceTypesConfigurationProvider
+    extends LocalConfigurationProvider {
+
+  private static class CustomResourceTypes {
+    private int count;
+    private String xml;
+
+    CustomResourceTypes(String xml, int count) {
+      this.xml = xml;
+      this.count = count;
+    }
+
+    public int getCount() {
+      return count;
+    }
+
+    public String getXml() {
+      return xml;
+    }
+  }
+
+  private static final String CUSTOM_RESOURCE_PREFIX = "customResource-";
+
+  private static CustomResourceTypes customResourceTypes =
+      createInitialResourceTypes();
+
+  private static CustomResourceTypes createInitialResourceTypes() {
+    return createCustomResourceTypes(2);
+  }
+
+  private static CustomResourceTypes createCustomResourceTypes(int count) {
+    List<String> resourceTypeNames = generateResourceTypeNames(count);
+
+    List<String> resourceUnitXmlElements = IntStream.range(0, count)
+            .boxed()
+            .map(i -> getResourceUnitsXml(resourceTypeNames.get(i)))
+            .collect(toList());
+
+    StringBuilder sb = new StringBuilder("<configuration>\n");
+    sb.append(getResourceTypesXml(resourceTypeNames));
+
+    for (String resourceUnitXml : resourceUnitXmlElements) {
+      sb.append(resourceUnitXml);
+
+    }
+    sb.append("</configuration>");
+
+    return new CustomResourceTypes(sb.toString(), count);
+  }
+
+  private static List<String> generateResourceTypeNames(int count) {
+    return IntStream.range(0, count)
+            .boxed()
+            .map(i -> CUSTOM_RESOURCE_PREFIX + i)
+            .collect(toList());
+  }
+
+  private static String getResourceUnitsXml(String resource) {
+    return "<property>\n" + "<name>yarn.resource-types." + resource
+        + ".units</name>\n" + "<value>k</value>\n" + "</property>\n";
+  }
+
+  private static String getResourceTypesXml(List<String> resources) {
+    final String resourceTypes = makeCommaSeparatedString(resources);
+
+    return "<property>\n" + "<name>yarn.resource-types</name>\n" + "<value>"
+        + resourceTypes + "</value>\n" + "</property>\n";
+  }
+
+  private static String makeCommaSeparatedString(List<String> resources) {
+    return resources.stream().collect(Collectors.joining(","));
+  }
+
+  @Override
+  public InputStream getConfigurationInputStream(Configuration bootstrapConf,
+      String name) throws YarnException, IOException {
+    if (YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE.equals(name)) {
+      return new ByteArrayInputStream(
+          customResourceTypes.getXml().getBytes());
+    } else {
+      return super.getConfigurationInputStream(bootstrapConf, name);
+    }
+  }
+
+  public static void reset() {
+    customResourceTypes = createInitialResourceTypes();
+  }
+
+  public static void setNumberOfResourceTypes(int count) {
+    customResourceTypes = createCustomResourceTypes(count);
+  }
+
+  public static List<String> getCustomResourceTypes() {
+    return generateResourceTypeNames(customResourceTypes.getCount());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
new file mode 100644
index 0000000..924411a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerJsonVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS =
+      Sets.newHashSet("minResources", "amUsedResources", "amMaxResources",
+          "fairResources", "clusterResources", "reservedResources",
+              "maxResources", "usedResources", "steadyFairResources",
+              "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerJsonVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(JSONObject jsonObject) {
+    try {
+      verifyResourcesContainDefaultResourceTypes(jsonObject, RESOURCE_FIELDS);
+      verifyResourcesContainCustomResourceTypes(jsonObject, RESOURCE_FIELDS);
+    } catch (JSONException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = queue.has(resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      JSONObject jsonObject) {
+    Object memory = jsonObject.opt("memory");
+    Object vCores = jsonObject.opt("vCores");
+
+    assertNotNull("Key 'memory' not found in: " + jsonObject, memory);
+    assertNotNull("Key 'vCores' not found in: " + jsonObject, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, queue.has(resourceCategory));
+      verifyResourceContainsAllCustomResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsAllCustomResourceTypes(
+      JSONObject resourceCategory) throws JSONException {
+    assertTrue("resourceCategory does not have resourceInformations: "
+        + resourceCategory, resourceCategory.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        resourceCategory.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.length() - 2);
+
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
new file mode 100644
index 0000000..63ae7b7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerXmlVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS = Sets.newHashSet(
+      "minResources", "amUsedResources", "amMaxResources", "fairResources",
+      "clusterResources", "reservedResources", "maxResources", "usedResources",
+      "steadyFairResources", "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerXmlVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(Element element) {
+    verifyResourcesContainDefaultResourceTypes(element, RESOURCE_FIELDS);
+    verifyResourcesContainCustomResourceTypes(element, RESOURCE_FIELDS);
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = hasChild(queue, resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      Element element) {
+    Object memory = opt(element, "memory");
+    Object vCores = opt(element, "vCores");
+
+    assertNotNull("Key 'memory' not found in: " + element, memory);
+    assertNotNull("Key 'vCores' not found in: " + element, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have key for resourceCategory: "
+          + resourceCategory, hasChild(queue, resourceCategory));
+      verifyResourceContainsCustomResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsCustomResourceTypes(
+      Element resourceCategory) {
+    assertEquals(
+        toXml(resourceCategory)
+            + " should have only one resourceInformations child!",
+        1, resourceCategory.getElementsByTagName("resourceInformations")
+            .getLength());
+    Element resourceInformations = (Element) resourceCategory
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.getLength() - 2);
+
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+    }
+  }
+
+  private Object opt(Node node, String child) {
+    NodeList nodes = getElementsByTagNameInternal(node, child);
+    if (nodes.getLength() > 0) {
+      return nodes.item(0);
+    }
+
+    return null;
+  }
+
+  private boolean hasChild(Node node, String child) {
+    return getElementsByTagNameInternal(node, child).getLength() > 0;
+  }
+
+  private NodeList getElementsByTagNameInternal(Node node, String child) {
+    if (node instanceof Element) {
+      return ((Element) node).getElementsByTagName(child);
+    } else if (node instanceof Document) {
+      return ((Document) node).getElementsByTagName(child);
+    } else {
+      throw new IllegalStateException("Unknown type of wrappedObject: " + node
+          + ", type: " + node.getClass());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
new file mode 100644
index 0000000..de4d5a1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.*;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import javax.ws.rs.core.MediaType;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class is to test response representations of queue resources,
+ * explicitly setting custom resource types. with the help of
+ * {@link CustomResourceTypesConfigurationProvider}
+ */
+public class TestRMWebServicesFairSchedulerCustomResourceTypes
+    extends JerseyTestBase {
+  private static MockRM rm;
+  private static YarnConfiguration conf;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      conf = new YarnConfiguration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(YarnConfiguration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  @After
+  public void tearDown() {
+    ResourceUtils.resetResourceTypes(new Configuration());
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  @After
+  public void teardown() {
+    CustomResourceTypesConfigurationProvider.reset();
+  }
+
+  public TestRMWebServicesFairSchedulerCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesJson() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesXml() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithElevenCustomResourceTypesXml() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerElevenWithCustomResourceTypesJson() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private void verifyJsonResponse(WebResource path, ClientResponse response,
+      List<String> customResourceTypes) {
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        JSONArray queues = json.getJSONObject("scheduler")
+            .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
+            .getJSONObject("childQueues").getJSONArray("queue");
+
+        // childQueueInfo consists of subqueue1 and subqueue2 info
+        assertEquals(2, queues.length());
+        JSONObject firstChildQueue = queues.getJSONObject(0);
+        new FairSchedulerJsonVerifications(customResourceTypes)
+            .verify(firstChildQueue);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+  }
+
+  private void verifyXmlResponse(WebResource path, ClientResponse response,
+          List<String> customResourceTypes) {
+    XmlCustomResourceTypeTestCase testCase = new XmlCustomResourceTypeTestCase(
+        path, new BufferedClientResponse(response));
+
+    testCase.verify(xml -> {
+      Element scheduler =
+          (Element) xml.getElementsByTagName("scheduler").item(0);
+      Element schedulerInfo =
+          (Element) scheduler.getElementsByTagName("schedulerInfo").item(0);
+      Element rootQueue =
+          (Element) schedulerInfo.getElementsByTagName("rootQueue").item(0);
+
+      Element childQueues =
+          (Element) rootQueue.getElementsByTagName("childQueues").item(0);
+      Element queue =
+          (Element) childQueues.getElementsByTagName("queue").item(0);
+      new FairSchedulerXmlVerifications(customResourceTypes).verify(queue);
+    });
+  }
+
+  private void incrementUsedResourcesOnQueue(final FSLeafQueue queue,
+      final long value) {
+    try {
+      Method incUsedResourceMethod = queue.getClass().getSuperclass()
+          .getDeclaredMethod("incUsedResource", Resource.class);
+      incUsedResourceMethod.setAccessible(true);
+
+      Map<String, Long> customResources =
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes()
+              .stream()
+              .collect(Collectors.toMap(Function.identity(), v -> value));
+
+      incUsedResourceMethod.invoke(queue,
+          Resource.newInstance(20, 30, customResources));
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
new file mode 100644
index 0000000..4ab1443
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringEqual;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.junit.Assert.*;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * JSON objects.
+ */
+public final class AppInfoJsonVerifications {
+
+  private AppInfoJsonVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param  app  an RMApp instance that contains the required values
+   *              to test against.
+   */
+  public static void verify(JSONObject info, RMApp app) throws JSONException {
+    checkStringMatch("id", app.getApplicationId().toString(),
+        info.getString("id"));
+    checkStringMatch("user", app.getUser(), info.getString("user"));
+    checkStringMatch("name", app.getName(), info.getString("name"));
+    checkStringMatch("applicationType", app.getApplicationType(),
+        info.getString("applicationType"));
+    checkStringMatch("queue", app.getQueue(), info.getString("queue"));
+    assertEquals("priority doesn't match", 0, info.getInt("priority"));
+    checkStringMatch("state", app.getState().toString(),
+        info.getString("state"));
+    checkStringMatch("finalStatus", app.getFinalApplicationStatus().toString(),
+        info.getString("finalStatus"));
+    assertEquals("progress doesn't match", 0,
+        (float) info.getDouble("progress"), 0.0);
+    if ("UNASSIGNED".equals(info.getString("trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+          info.getString("trackingUI"));
+    }
+    checkStringEqual("diagnostics", app.getDiagnostics().toString(),
+        info.getString("diagnostics"));
+    assertEquals("clusterId doesn't match",
+        ResourceManager.getClusterTimeStamp(), info.getLong("clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+        info.getLong("startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+        info.getLong("finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+        info.getLong("elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress",
+        app.getCurrentAppAttempt().getMasterContainer().getNodeHttpAddress(),
+        info.getString("amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        info.getString("amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        info.getString("amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024, info.getInt("allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+        info.getInt("allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        info.getInt("runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+        info.getJSONObject("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getMemorySize(),
+        info.getInt("preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+        info.getInt("preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumNonAMContainersPreempted(),
+        info.getInt("numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumAMContainersPreempted(),
+        info.getInt("numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match",
+        app.getLogAggregationStatusForAppReport().toString(),
+        info.getString("logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match",
+        app.getApplicationSubmissionContext().getUnmanagedAM(),
+        info.getBoolean("unmanagedApplication"));
+
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      assertEquals("appNodeLabelExpression doesn't match",
+          app.getApplicationSubmissionContext().getNodeLabelExpression(),
+          info.getString("appNodeLabelExpression"));
+    }
+    assertEquals("amNodeLabelExpression doesn't match",
+        app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+        info.getString("amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+        AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+        info.getString("amRPCAddress"));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
deleted file mode 100644
index fdc3ce7..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
-
-/**
- * Utilities for converting protobuf classes.
- */
-public final class KSMPBHelper {
-
-  private KSMPBHelper() {
-    /** Hidden constructor */
-  }
-
-  /**
-   * Converts OzoneAcl into protobuf's OzoneAclInfo.
-   * @return OzoneAclInfo
-   */
-  public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) {
-    OzoneAclInfo.OzoneAclType aclType;
-    switch(acl.getType()) {
-    case USER:
-      aclType = OzoneAclType.USER;
-      break;
-    case GROUP:
-      aclType = OzoneAclType.GROUP;
-      break;
-    case WORLD:
-      aclType = OzoneAclType.WORLD;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL type is not recognized");
-    }
-    OzoneAclInfo.OzoneAclRights aclRights;
-    switch(acl.getRights()) {
-    case READ:
-      aclRights = OzoneAclRights.READ;
-      break;
-    case WRITE:
-      aclRights = OzoneAclRights.WRITE;
-      break;
-    case READ_WRITE:
-      aclRights = OzoneAclRights.READ_WRITE;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL right is not recognized");
-    }
-
-    return OzoneAclInfo.newBuilder().setType(aclType)
-        .setName(acl.getName())
-        .setRights(aclRights)
-        .build();
-  }
-
-  /**
-   * Converts protobuf's OzoneAclInfo into OzoneAcl.
-   * @return OzoneAcl
-   */
-  public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) {
-    OzoneAcl.OzoneACLType aclType;
-    switch(aclInfo.getType()) {
-    case USER:
-      aclType = OzoneAcl.OzoneACLType.USER;
-      break;
-    case GROUP:
-      aclType = OzoneAcl.OzoneACLType.GROUP;
-      break;
-    case WORLD:
-      aclType = OzoneAcl.OzoneACLType.WORLD;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL type is not recognized");
-    }
-    OzoneAcl.OzoneACLRights aclRights;
-    switch(aclInfo.getRights()) {
-    case READ:
-      aclRights = OzoneAcl.OzoneACLRights.READ;
-      break;
-    case WRITE:
-      aclRights = OzoneAcl.OzoneACLRights.WRITE;
-      break;
-    case READ_WRITE:
-      aclRights = OzoneAcl.OzoneACLRights.READ_WRITE;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL right is not recognized");
-    }
-
-    return new OzoneAcl(aclType, aclInfo.getName(), aclRights);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
new file mode 100644
index 0000000..d57d32e
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
+
+/**
+ * Utilities for converting protobuf classes.
+ */
+public final class OMPBHelper {
+
+  private OMPBHelper() {
+    /** Hidden constructor */
+  }
+
+  /**
+   * Converts OzoneAcl into protobuf's OzoneAclInfo.
+   * @return OzoneAclInfo
+   */
+  public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) {
+    OzoneAclInfo.OzoneAclType aclType;
+    switch(acl.getType()) {
+    case USER:
+      aclType = OzoneAclType.USER;
+      break;
+    case GROUP:
+      aclType = OzoneAclType.GROUP;
+      break;
+    case WORLD:
+      aclType = OzoneAclType.WORLD;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL type is not recognized");
+    }
+    OzoneAclInfo.OzoneAclRights aclRights;
+    switch(acl.getRights()) {
+    case READ:
+      aclRights = OzoneAclRights.READ;
+      break;
+    case WRITE:
+      aclRights = OzoneAclRights.WRITE;
+      break;
+    case READ_WRITE:
+      aclRights = OzoneAclRights.READ_WRITE;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL right is not recognized");
+    }
+
+    return OzoneAclInfo.newBuilder().setType(aclType)
+        .setName(acl.getName())
+        .setRights(aclRights)
+        .build();
+  }
+
+  /**
+   * Converts protobuf's OzoneAclInfo into OzoneAcl.
+   * @return OzoneAcl
+   */
+  public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) {
+    OzoneAcl.OzoneACLType aclType;
+    switch(aclInfo.getType()) {
+    case USER:
+      aclType = OzoneAcl.OzoneACLType.USER;
+      break;
+    case GROUP:
+      aclType = OzoneAcl.OzoneACLType.GROUP;
+      break;
+    case WORLD:
+      aclType = OzoneAcl.OzoneACLType.WORLD;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL type is not recognized");
+    }
+    OzoneAcl.OzoneACLRights aclRights;
+    switch(aclInfo.getRights()) {
+    case READ:
+      aclRights = OzoneAcl.OzoneACLRights.READ;
+      break;
+    case WRITE:
+      aclRights = OzoneAcl.OzoneACLRights.WRITE;
+      break;
+    case READ_WRITE:
+      aclRights = OzoneAcl.OzoneACLRights.READ_WRITE;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL right is not recognized");
+    }
+
+    return new OzoneAcl(aclType, aclInfo.getName(), aclRights);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
deleted file mode 100644
index d3d1de6..0000000
--- a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
+++ /dev/null
@@ -1,474 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.ozone.protocol.proto";
-option java_outer_classname = "KeySpaceManagerProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.ozone;
-
-/**
-This is file contains the protocol to communicate with
-Ozone key space manager. Ozone KSM manages the namespace for ozone.
-This is similar to Namenode for Ozone.
-*/
-
-import "hdfs.proto";
-import "hdds.proto";
-
-enum Status {
-    OK = 1;
-    VOLUME_NOT_UNIQUE = 2;
-    VOLUME_NOT_FOUND = 3;
-    VOLUME_NOT_EMPTY = 4;
-    VOLUME_ALREADY_EXISTS = 5;
-    USER_NOT_FOUND = 6;
-    USER_TOO_MANY_VOLUMES = 7;
-    BUCKET_NOT_FOUND = 8;
-    BUCKET_NOT_EMPTY = 9;
-    BUCKET_ALREADY_EXISTS = 10;
-    KEY_ALREADY_EXISTS = 11;
-    KEY_NOT_FOUND = 12;
-    INVALID_KEY_NAME = 13;
-    ACCESS_DENIED = 14;
-    INTERNAL_ERROR = 15;
-}
-
-
-message VolumeInfo {
-    required string adminName = 1;
-    required string ownerName = 2;
-    required string volume = 3;
-    optional uint64 quotaInBytes = 4;
-    repeated hadoop.hdds.KeyValue metadata = 5;
-    repeated OzoneAclInfo volumeAcls = 6;
-    required uint64 creationTime = 7;
-}
-
-/**
-    Creates a volume
-*/
-message CreateVolumeRequest {
-    required VolumeInfo volumeInfo = 1;
-}
-
-message CreateVolumeResponse {
-
-    required Status status = 1;
-}
-
-message VolumeList {
-    repeated string volumeNames = 1;
-}
-
-/**
-    Changes the Volume Properties -- like ownership and quota for a volume.
-*/
-message SetVolumePropertyRequest {
-    required string volumeName = 1;
-    optional string ownerName = 2;
-    optional uint64 quotaInBytes = 3;
-}
-
-message SetVolumePropertyResponse {
-    required Status status = 1;
-}
-
-/**
- * Checks if the user has specified permissions for the volume
- */
-message CheckVolumeAccessRequest {
-    required string volumeName = 1;
-    required OzoneAclInfo userAcl = 2;
-}
-
-message CheckVolumeAccessResponse {
-
-    required Status status = 1;
-}
-
-
-/**
-    Returns information about a volume.
-*/
-
-message InfoVolumeRequest {
-    required string volumeName = 1;
-}
-
-message InfoVolumeResponse {
-    required Status status = 1;
-    optional VolumeInfo volumeInfo = 2;
-
-}
-
-/**
-    Deletes an existing volume.
-*/
-message DeleteVolumeRequest {
-    required string volumeName = 1;
-}
-
-message DeleteVolumeResponse {
-    required Status status = 1;
-}
-
-
-/**
-    List Volumes -- List all volumes in the cluster or by user.
-*/
-
-message ListVolumeRequest {
-    enum Scope {
-        USER_VOLUMES = 1;   // User volumes -- called by user
-        VOLUMES_BY_USER = 2; // User volumes - called by Admin
-        VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster
-    }
-    required Scope scope = 1;
-    optional string userName = 2;
-    optional string prefix = 3;
-    optional string prevKey = 4;
-    optional uint32 maxKeys = 5;
-}
-
-message ListVolumeResponse {
-    required Status status = 1;
-    repeated VolumeInfo volumeInfo = 2;
-}
-
-message BucketInfo {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    repeated OzoneAclInfo acls = 3;
-    required bool isVersionEnabled = 4 [default = false];
-    required hadoop.hdfs.StorageTypeProto storageType = 5 [default = DISK];
-    required uint64 creationTime = 6;
-}
-
-message BucketArgs {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    repeated OzoneAclInfo addAcls = 3;
-    repeated OzoneAclInfo removeAcls = 4;
-    optional bool isVersionEnabled = 5;
-    optional hadoop.hdfs.StorageTypeProto storageType = 6;
-}
-
-message OzoneAclInfo {
-    enum OzoneAclType {
-        USER = 1;
-        GROUP = 2;
-        WORLD = 3;
-    }
-    enum OzoneAclRights {
-        READ = 1;
-        WRITE = 2;
-        READ_WRITE = 3;
-    }
-    required OzoneAclType type = 1;
-    required string name = 2;
-    required OzoneAclRights rights = 3;
-}
-
-message CreateBucketRequest {
-    required BucketInfo bucketInfo = 1;
-}
-
-message CreateBucketResponse {
-    required Status status = 1;
-}
-
-message InfoBucketRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-}
-
-message InfoBucketResponse {
-    required Status status = 1;
-    optional BucketInfo bucketInfo = 2;
-}
-
-message ListBucketsRequest {
-    required string volumeName = 1;
-    optional string startKey = 2;
-    optional string prefix = 3;
-    optional int32 count = 4;
-}
-
-message ListBucketsResponse {
-    required Status status = 1;
-    repeated BucketInfo bucketInfo = 2;
-}
-
-message KeyArgs {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    optional uint64 dataSize = 4;
-    optional hadoop.hdds.ReplicationType type = 5;
-    optional hadoop.hdds.ReplicationFactor factor = 6;
-}
-
-message KeyLocation {
-    required hadoop.hdds.BlockID blockID = 1;
-    required bool shouldCreateContainer = 2;
-    required uint64 offset = 3;
-    required uint64 length = 4;
-    // indicated at which version this block gets created.
-    optional uint64 createVersion = 5;
-}
-
-message KeyLocationList {
-    optional uint64 version = 1;
-    repeated KeyLocation keyLocations = 2;
-}
-
-message KeyInfo {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    required uint64 dataSize = 4;
-    required hadoop.hdds.ReplicationType type = 5;
-    required hadoop.hdds.ReplicationFactor factor = 6;
-    repeated KeyLocationList keyLocationList = 7;
-    required uint64 creationTime = 8;
-    required uint64 modificationTime = 9;
-    optional uint64 latestVersion = 10;
-}
-
-message LocateKeyRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message LocateKeyResponse {
-    required Status status = 1;
-    optional KeyInfo keyInfo = 2;
-    // clients' followup request may carry this ID for stateful operations (similar
-    // to a cookie).
-    optional uint32 ID = 3;
-    // TODO : allow specifiying a particular version to read.
-    optional uint64 openVersion = 4;
-}
-
-message SetBucketPropertyRequest {
-    required BucketArgs bucketArgs = 1;
-}
-
-message SetBucketPropertyResponse {
-    required Status status = 1;
-}
-
-message RenameKeyRequest{
-    required KeyArgs keyArgs = 1;
-    required string toKeyName = 2;
-}
-
-message RenameKeyResponse{
-    required Status status = 1;
-}
-
-message DeleteBucketRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-}
-
-message DeleteBucketResponse {
-    required Status status = 1;
-}
-
-message ListKeysRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    optional string startKey = 3;
-    optional string prefix = 4;
-    optional int32 count = 5;
-}
-
-message ListKeysResponse {
-    required Status status = 1;
-    repeated KeyInfo keyInfo = 2;
-}
-
-message AllocateBlockRequest {
-    required KeyArgs keyArgs = 1;
-    required uint32 clientID = 2;
-}
-
-message AllocateBlockResponse {
-    required Status status = 1;
-    required KeyLocation keyLocation = 2;
-}
-
-message CommitKeyRequest {
-    required KeyArgs keyArgs = 1;
-    required uint32 clientID = 2;
-}
-
-message CommitKeyResponse {
-    required Status status = 1;
-}
-
-message ServiceListRequest {
-}
-
-message ServiceListResponse {
-    required Status status = 1;
-    repeated ServiceInfo serviceInfo = 2;
-}
-
-message ServicePort {
-    enum Type {
-        RPC = 1;
-        HTTP = 2;
-        HTTPS = 3;
-        RATIS = 4;
-    };
-    required Type type = 1;
-    required uint32 value = 2;
-}
-
-message ServiceInfo {
-    required hadoop.hdds.NodeType nodeType = 1;
-    required string hostname = 2;
-    repeated ServicePort servicePorts = 3;
-}
-
-/**
- The KSM service that takes care of Ozone namespace.
-*/
-service KeySpaceManagerService {
-
-    /**
-        Creates a Volume.
-    */
-    rpc createVolume(CreateVolumeRequest)
-        returns(CreateVolumeResponse);
-
-    /**
-        Allows modificiation of volume properties.
-    */
-    rpc setVolumeProperty(SetVolumePropertyRequest)
-        returns (SetVolumePropertyResponse);
-
-    /**
-        Checks if the specified volume is accesible by the specified user.
-    */
-    rpc checkVolumeAccess(CheckVolumeAccessRequest)
-        returns (CheckVolumeAccessResponse);
-
-    /**
-        Gets Volume information.
-    */
-    rpc infoVolume(InfoVolumeRequest)
-        returns(InfoVolumeResponse);
-    /**
-        Deletes a volume if it is empty.
-    */
-    rpc deleteVolume(DeleteVolumeRequest)
-        returns (DeleteVolumeResponse);
-
-    /**
-        Lists Volumes
-    */
-    rpc listVolumes(ListVolumeRequest)
-        returns (ListVolumeResponse);
-
-    /**
-        Creates a Bucket.
-    */
-    rpc createBucket(CreateBucketRequest)
-        returns(CreateBucketResponse);
-
-    /**
-        Get Bucket information.
-    */
-    rpc infoBucket(InfoBucketRequest)
-        returns(InfoBucketResponse);
-
-    /**
-        Sets bucket properties.
-    */
-    rpc setBucketProperty(SetBucketPropertyRequest)
-        returns(SetBucketPropertyResponse);
-
-    /**
-        Get key.
-    */
-    rpc createKey(LocateKeyRequest)
-        returns(LocateKeyResponse);
-
-    /**
-       Look up for an existing key.
-    */
-    rpc lookupKey(LocateKeyRequest)
-        returns(LocateKeyResponse);
-
-    /**
-       Rename an existing key within a bucket.
-    */
-    rpc renameKey(RenameKeyRequest)
-        returns(RenameKeyResponse);
-
-    /**
-       Delete an existing key.
-    */
-    rpc deleteKey(LocateKeyRequest)
-        returns(LocateKeyResponse);
-
-    /**
-       Deletes a bucket from volume if it is empty.
-    */
-    rpc deleteBucket(DeleteBucketRequest)
-        returns (DeleteBucketResponse);
-
-    /**
-       List Buckets.
-    */
-    rpc listBuckets(ListBucketsRequest)
-    returns(ListBucketsResponse);
-
-    /**
-       List Keys.
-    */
-    rpc listKeys(ListKeysRequest)
-    returns(ListKeysResponse);
-
-    /**
-      Commit a key.
-    */
-    rpc commitKey(CommitKeyRequest)
-    returns(CommitKeyResponse);
-
-    /**
-      Allocate a new block for a key.
-    */
-    rpc allocateBlock(AllocateBlockRequest)
-    returns(AllocateBlockResponse);
-
-    /**
-      Returns list of Ozone services with its configuration details.
-    */
-    rpc getServiceList(ServiceListRequest)
-    returns(ServiceListResponse);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
new file mode 100644
index 0000000..36b1c83
--- /dev/null
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -0,0 +1,480 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and unstable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *unstable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.ozone.protocol.proto";
+option java_outer_classname = "OzoneManagerProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.ozone;
+
+/**
+This is file contains the protocol to communicate with
+Ozone Manager. Ozone Manager manages the namespace for ozone.
+This is similar to Namenode for Ozone.
+*/
+
+import "hdfs.proto";
+import "hdds.proto";
+
+enum Status {
+    OK = 1;
+    VOLUME_NOT_UNIQUE = 2;
+    VOLUME_NOT_FOUND = 3;
+    VOLUME_NOT_EMPTY = 4;
+    VOLUME_ALREADY_EXISTS = 5;
+    USER_NOT_FOUND = 6;
+    USER_TOO_MANY_VOLUMES = 7;
+    BUCKET_NOT_FOUND = 8;
+    BUCKET_NOT_EMPTY = 9;
+    BUCKET_ALREADY_EXISTS = 10;
+    KEY_ALREADY_EXISTS = 11;
+    KEY_NOT_FOUND = 12;
+    INVALID_KEY_NAME = 13;
+    ACCESS_DENIED = 14;
+    INTERNAL_ERROR = 15;
+    KEY_ALLOCATION_ERROR = 16;
+    KEY_DELETION_ERROR = 17;
+    KEY_RENAME_ERROR = 18;
+    METADATA_ERROR = 19;
+    OM_NOT_INITIALIZED = 20;
+    SCM_VERSION_MISMATCH_ERROR = 21;
+}
+
+
+message VolumeInfo {
+    required string adminName = 1;
+    required string ownerName = 2;
+    required string volume = 3;
+    optional uint64 quotaInBytes = 4;
+    repeated hadoop.hdds.KeyValue metadata = 5;
+    repeated OzoneAclInfo volumeAcls = 6;
+    required uint64 creationTime = 7;
+}
+
+/**
+    Creates a volume
+*/
+message CreateVolumeRequest {
+    required VolumeInfo volumeInfo = 1;
+}
+
+message CreateVolumeResponse {
+
+    required Status status = 1;
+}
+
+message VolumeList {
+    repeated string volumeNames = 1;
+}
+
+/**
+    Changes the Volume Properties -- like ownership and quota for a volume.
+*/
+message SetVolumePropertyRequest {
+    required string volumeName = 1;
+    optional string ownerName = 2;
+    optional uint64 quotaInBytes = 3;
+}
+
+message SetVolumePropertyResponse {
+    required Status status = 1;
+}
+
+/**
+ * Checks if the user has specified permissions for the volume
+ */
+message CheckVolumeAccessRequest {
+    required string volumeName = 1;
+    required OzoneAclInfo userAcl = 2;
+}
+
+message CheckVolumeAccessResponse {
+
+    required Status status = 1;
+}
+
+
+/**
+    Returns information about a volume.
+*/
+
+message InfoVolumeRequest {
+    required string volumeName = 1;
+}
+
+message InfoVolumeResponse {
+    required Status status = 1;
+    optional VolumeInfo volumeInfo = 2;
+
+}
+
+/**
+    Deletes an existing volume.
+*/
+message DeleteVolumeRequest {
+    required string volumeName = 1;
+}
+
+message DeleteVolumeResponse {
+    required Status status = 1;
+}
+
+
+/**
+    List Volumes -- List all volumes in the cluster or by user.
+*/
+
+message ListVolumeRequest {
+    enum Scope {
+        USER_VOLUMES = 1;   // User volumes -- called by user
+        VOLUMES_BY_USER = 2; // User volumes - called by Admin
+        VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster
+    }
+    required Scope scope = 1;
+    optional string userName = 2;
+    optional string prefix = 3;
+    optional string prevKey = 4;
+    optional uint32 maxKeys = 5;
+}
+
+message ListVolumeResponse {
+    required Status status = 1;
+    repeated VolumeInfo volumeInfo = 2;
+}
+
+message BucketInfo {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    repeated OzoneAclInfo acls = 3;
+    required bool isVersionEnabled = 4 [default = false];
+    required hadoop.hdfs.StorageTypeProto storageType = 5 [default = DISK];
+    required uint64 creationTime = 6;
+}
+
+message BucketArgs {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    repeated OzoneAclInfo addAcls = 3;
+    repeated OzoneAclInfo removeAcls = 4;
+    optional bool isVersionEnabled = 5;
+    optional hadoop.hdfs.StorageTypeProto storageType = 6;
+}
+
+message OzoneAclInfo {
+    enum OzoneAclType {
+        USER = 1;
+        GROUP = 2;
+        WORLD = 3;
+    }
+    enum OzoneAclRights {
+        READ = 1;
+        WRITE = 2;
+        READ_WRITE = 3;
+    }
+    required OzoneAclType type = 1;
+    required string name = 2;
+    required OzoneAclRights rights = 3;
+}
+
+message CreateBucketRequest {
+    required BucketInfo bucketInfo = 1;
+}
+
+message CreateBucketResponse {
+    required Status status = 1;
+}
+
+message InfoBucketRequest {
+    required string volumeName = 1;
+    required string bucketName = 2;
+}
+
+message InfoBucketResponse {
+    required Status status = 1;
+    optional BucketInfo bucketInfo = 2;
+}
+
+message ListBucketsRequest {
+    required string volumeName = 1;
+    optional string startKey = 2;
+    optional string prefix = 3;
+    optional int32 count = 4;
+}
+
+message ListBucketsResponse {
+    required Status status = 1;
+    repeated BucketInfo bucketInfo = 2;
+}
+
+message KeyArgs {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    required string keyName = 3;
+    optional uint64 dataSize = 4;
+    optional hadoop.hdds.ReplicationType type = 5;
+    optional hadoop.hdds.ReplicationFactor factor = 6;
+}
+
+message KeyLocation {
+    required hadoop.hdds.BlockID blockID = 1;
+    required bool shouldCreateContainer = 2;
+    required uint64 offset = 3;
+    required uint64 length = 4;
+    // indicated at which version this block gets created.
+    optional uint64 createVersion = 5;
+}
+
+message KeyLocationList {
+    optional uint64 version = 1;
+    repeated KeyLocation keyLocations = 2;
+}
+
+message KeyInfo {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    required string keyName = 3;
+    required uint64 dataSize = 4;
+    required hadoop.hdds.ReplicationType type = 5;
+    required hadoop.hdds.ReplicationFactor factor = 6;
+    repeated KeyLocationList keyLocationList = 7;
+    required uint64 creationTime = 8;
+    required uint64 modificationTime = 9;
+    optional uint64 latestVersion = 10;
+}
+
+message LocateKeyRequest {
+    required KeyArgs keyArgs = 1;
+}
+
+message LocateKeyResponse {
+    required Status status = 1;
+    optional KeyInfo keyInfo = 2;
+    // clients' followup request may carry this ID for stateful operations (similar
+    // to a cookie).
+    optional uint32 ID = 3;
+    // TODO : allow specifiying a particular version to read.
+    optional uint64 openVersion = 4;
+}
+
+message SetBucketPropertyRequest {
+    required BucketArgs bucketArgs = 1;
+}
+
+message SetBucketPropertyResponse {
+    required Status status = 1;
+}
+
+message RenameKeyRequest{
+    required KeyArgs keyArgs = 1;
+    required string toKeyName = 2;
+}
+
+message RenameKeyResponse{
+    required Status status = 1;
+}
+
+message DeleteBucketRequest {
+    required string volumeName = 1;
+    required string bucketName = 2;
+}
+
+message DeleteBucketResponse {
+    required Status status = 1;
+}
+
+message ListKeysRequest {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    optional string startKey = 3;
+    optional string prefix = 4;
+    optional int32 count = 5;
+}
+
+message ListKeysResponse {
+    required Status status = 1;
+    repeated KeyInfo keyInfo = 2;
+}
+
+message AllocateBlockRequest {
+    required KeyArgs keyArgs = 1;
+    required uint32 clientID = 2;
+}
+
+message AllocateBlockResponse {
+    required Status status = 1;
+    required KeyLocation keyLocation = 2;
+}
+
+message CommitKeyRequest {
+    required KeyArgs keyArgs = 1;
+    required uint32 clientID = 2;
+}
+
+message CommitKeyResponse {
+    required Status status = 1;
+}
+
+message ServiceListRequest {
+}
+
+message ServiceListResponse {
+    required Status status = 1;
+    repeated ServiceInfo serviceInfo = 2;
+}
+
+message ServicePort {
+    enum Type {
+        RPC = 1;
+        HTTP = 2;
+        HTTPS = 3;
+        RATIS = 4;
+    };
+    required Type type = 1;
+    required uint32 value = 2;
+}
+
+message ServiceInfo {
+    required hadoop.hdds.NodeType nodeType = 1;
+    required string hostname = 2;
+    repeated ServicePort servicePorts = 3;
+}
+
+/**
+ The OM service that takes care of Ozone namespace.
+*/
+service OzoneManagerService {
+
+    /**
+        Creates a Volume.
+    */
+    rpc createVolume(CreateVolumeRequest)
+        returns(CreateVolumeResponse);
+
+    /**
+        Allows modificiation of volume properties.
+    */
+    rpc setVolumeProperty(SetVolumePropertyRequest)
+        returns (SetVolumePropertyResponse);
+
+    /**
+        Checks if the specified volume is accesible by the specified user.
+    */
+    rpc checkVolumeAccess(CheckVolumeAccessRequest)
+        returns (CheckVolumeAccessResponse);
+
+    /**
+        Gets Volume information.
+    */
+    rpc infoVolume(InfoVolumeRequest)
+        returns(InfoVolumeResponse);
+    /**
+        Deletes a volume if it is empty.
+    */
+    rpc deleteVolume(DeleteVolumeRequest)
+        returns (DeleteVolumeResponse);
+
+    /**
+        Lists Volumes
+    */
+    rpc listVolumes(ListVolumeRequest)
+        returns (ListVolumeResponse);
+
+    /**
+        Creates a Bucket.
+    */
+    rpc createBucket(CreateBucketRequest)
+        returns(CreateBucketResponse);
+
+    /**
+        Get Bucket information.
+    */
+    rpc infoBucket(InfoBucketRequest)
+        returns(InfoBucketResponse);
+
+    /**
+        Sets bucket properties.
+    */
+    rpc setBucketProperty(SetBucketPropertyRequest)
+        returns(SetBucketPropertyResponse);
+
+    /**
+        Get key.
+    */
+    rpc createKey(LocateKeyRequest)
+        returns(LocateKeyResponse);
+
+    /**
+       Look up for an existing key.
+    */
+    rpc lookupKey(LocateKeyRequest)
+        returns(LocateKeyResponse);
+
+    /**
+       Rename an existing key within a bucket.
+    */
+    rpc renameKey(RenameKeyRequest)
+        returns(RenameKeyResponse);
+
+    /**
+       Delete an existing key.
+    */
+    rpc deleteKey(LocateKeyRequest)
+        returns(LocateKeyResponse);
+
+    /**
+       Deletes a bucket from volume if it is empty.
+    */
+    rpc deleteBucket(DeleteBucketRequest)
+        returns (DeleteBucketResponse);
+
+    /**
+       List Buckets.
+    */
+    rpc listBuckets(ListBucketsRequest)
+    returns(ListBucketsResponse);
+
+    /**
+       List Keys.
+    */
+    rpc listKeys(ListKeysRequest)
+    returns(ListKeysResponse);
+
+    /**
+      Commit a key.
+    */
+    rpc commitKey(CommitKeyRequest)
+    returns(CommitKeyResponse);
+
+    /**
+      Allocate a new block for a key.
+    */
+    rpc allocateBlock(AllocateBlockRequest)
+    returns(AllocateBlockResponse);
+
+    /**
+      Returns list of Ozone services with its configuration details.
+    */
+    rpc getServiceList(ServiceListRequest)
+    returns(ServiceListResponse);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/content/GettingStarted.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/GettingStarted.md b/hadoop-ozone/docs/content/GettingStarted.md
index 531d192..117a307 100644
--- a/hadoop-ozone/docs/content/GettingStarted.md
+++ b/hadoop-ozone/docs/content/GettingStarted.md
@@ -194,12 +194,12 @@ This path will be created by datanodes if it doesn't exist already. Here is an
     </property>
     ```
 
-1. **ozone.ksm.address** OM server address. This is used by OzoneClient and
+1. **ozone.om.address** OM server address. This is used by OzoneClient and
 Ozone File System.
     ```
     <property>
-       <name>ozone.ksm.address</name>
-       <value>ksm.hadoop.apache.org</value>
+       <name>ozone.om.address</name>
+       <value>om.hadoop.apache.org</value>
     </property>
     ```
 
@@ -210,10 +210,10 @@ Ozone File System.
 | ozone.enabled                  | True                         | This enables SCM and  containers in HDFS cluster.                |
 | ozone.metadata.dirs            | file path                    | The metadata will be stored here.                                |
 | ozone.scm.names                | SCM server name              | Hostname:port or or IP:port address of SCM.                      |
-| ozone.scm.block.client.address | SCM server name and port     | Used by services like OM                                        |
+| ozone.scm.block.client.address | SCM server name and port     | Used by services like OM                                         |
 | ozone.scm.client.address       | SCM server name and port     | Used by client side                                              |
 | ozone.scm.datanode.address     | SCM server name and port     | Used by datanode to talk to SCM                                  |
-| ozone.ksm.address              | OM server name              | Used by Ozone handler and Ozone file system.                     |
+| ozone.om.address               | OM server name               | Used by Ozone handler and Ozone file system.                     |
 
 
 #### Sample ozone-site.xml
@@ -253,7 +253,7 @@ Ozone File System.
      </property>
 
      <property>
-       <name>ozone.ksm.address</name>
+       <name>ozone.om.address</name>
        <value>127.0.0.1:9874</value>
      </property>
 </configuration>
@@ -286,12 +286,12 @@ ozone --daemon start scm
 
 Once SCM gets started, OM must be initialized.
 ```
-ozone ksm -createObjectStore
+ozone om -createObjectStore
 ```
 
 Start OM.
 ```
-ozone --daemon start ksm
+ozone --daemon start om
 ```
 
 If you would like to start HDFS and Ozone together, you can do that by running
@@ -349,7 +349,7 @@ log4j.additivity.org.apache.hadoop.ozone=false
 ```
 
 On the SCM/OM side, you will be able to see
-1. `hadoop-hdfs-ksm-hostname.log`
+1. `hadoop-hdfs-om-hostname.log`
 1. `hadoop-hdfs-scm-hostname.log`
 
 ## Reporting Bugs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/content/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Metrics.md b/hadoop-ozone/docs/content/Metrics.md
index dc58460..64a481f 100644
--- a/hadoop-ozone/docs/content/Metrics.md
+++ b/hadoop-ozone/docs/content/Metrics.md
@@ -131,10 +131,10 @@ Following are the counters for containers:
 
 ### Key Space Metrics
 
-The metrics for various key space manager operations in HDFS Ozone.
+The metrics for various Ozone Manager operations in HDFS Ozone.
 
-key space manager (KSM) is a service that similar to the Namenode in HDFS.
-In the current design of KSM, it maintains metadata of all volumes, buckets and keys.
+The Ozone Manager (OM) is a service that similar to the Namenode in HDFS.
+In the current design of OM, it maintains metadata of all volumes, buckets and keys.
 These metrics are only available when ozone is enabled.
 
 Following is the set of counters maintained for each key space operation.
@@ -142,12 +142,12 @@ Following is the set of counters maintained for each key space operation.
 *Total number of operation* - We maintain an array which counts how
 many times a specific operation has been performed.
 Eg.`NumVolumeCreate` tells us how many times create volume has been
-invoked in KSM.
+invoked in OM.
 
 *Total number of failed operation* - This type operation is opposite to the above
 operation.
 Eg.`NumVolumeCreateFails` tells us how many times create volume has been invoked
-failed in KSM.
+failed in OM.
 
 Following are the counters for each of key space operations.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/content/_index.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/_index.md b/hadoop-ozone/docs/content/_index.md
index ab7eabe..383b2e0 100644
--- a/hadoop-ozone/docs/content/_index.md
+++ b/hadoop-ozone/docs/content/_index.md
@@ -56,14 +56,14 @@ This is like DFSClient in HDFS. This acts as the standard client to talk to
 Ozone. All other components that we have discussed so far rely on Ozone client
 (TODO: Add Ozone client documentation).

 
-## Key Space Manager

+## Ozone Manager
 
-Key Space Manager(KSM) takes care of the Ozone's namespace.
-All ozone entities like volumes, buckets and keys are managed by KSM
-(TODO: Add KSM documentation). In Short, KSM is the metadata manager for Ozone.
-KSM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
+Ozone Manager (OM) takes care of the Ozone's namespace.
+All ozone entities like volumes, buckets and keys are managed by OM
+(TODO: Add OM documentation). In short, OM is the metadata manager for Ozone.
+OM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
 client.  Ozone client writes data to these blocks.
-KSM will eventually be replicated via Apache Ratis for High Availability.

+OM will eventually be replicated via Apache Ratis for High Availability.

 
 ## Storage Container Manager
 Storage Container Manager (SCM) is the block and cluster manager for Ozone.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/static/OzoneOverview.svg
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/static/OzoneOverview.svg b/hadoop-ozone/docs/static/OzoneOverview.svg
index 2e14d3f..0120a5c 100644
--- a/hadoop-ozone/docs/static/OzoneOverview.svg
+++ b/hadoop-ozone/docs/static/OzoneOverview.svg
@@ -166,7 +166,7 @@
             <path d="M307.5,148.5 L433.5,148.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
             <path id="Line-decoration-1" d="M433.5,148.5 L422.7,145.5 L422.7,151.5 L433.5,148.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
             <path d="M4,232 L699,232" id="Line" stroke="#000000" stroke-width="2" stroke-linecap="square" stroke-dasharray="5,2,5"></path>
-            <g id="KSM" transform="translate(432.000000, 132.000000)">
+            <g id="OM" transform="translate(432.000000, 132.000000)">
                 <g id="Rectangle-3">
                     <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-19"></use>
                     <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="225" height="35" rx="8"></rect>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 091d771..b568672 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -82,12 +82,12 @@ public interface MiniOzoneCluster {
   StorageContainerManager getStorageContainerManager();
 
   /**
-   * Returns {@link KeySpaceManager} associated with this
+   * Returns {@link OzoneManager} associated with this
    * {@link MiniOzoneCluster} instance.
    *
-   * @return {@link KeySpaceManager} instance
+   * @return {@link OzoneManager} instance
    */
-  KeySpaceManager getKeySpaceManager();
+  OzoneManager getOzoneManager();
 
   /**
    * Returns the list of {@link HddsDatanodeService} which are part of this
@@ -141,11 +141,11 @@ public interface MiniOzoneCluster {
   void restartStorageContainerManager() throws IOException;
 
   /**
-   * Restarts KeySpaceManager instance.
+   * Restarts OzoneManager instance.
    *
    * @throws IOException
    */
-  void restartKeySpaceManager() throws IOException;
+  void restartOzoneManager() throws IOException;
 
   /**
    * Restart a particular HddsDatanode.
@@ -184,13 +184,13 @@ public interface MiniOzoneCluster {
     protected Optional<Integer> hbInterval = Optional.empty();
     protected Optional<Integer> hbProcessorInterval = Optional.empty();
     protected Optional<String> scmId = Optional.empty();
-    protected Optional<String> ksmId = Optional.empty();
+    protected Optional<String> omId = Optional.empty();
 
     protected Boolean ozoneEnabled = true;
     protected Boolean randomContainerPort = true;
 
     // Use relative smaller number of handlers for testing
-    protected int numOfKsmHandlers = 20;
+    protected int numOfOmHandlers = 20;
     protected int numOfScmHandlers = 20;
     protected int numOfDatanodes = 1;
 
@@ -226,14 +226,14 @@ public interface MiniOzoneCluster {
     }
 
     /**
-     * Sets the KSM id.
+     * Sets the OM id.
      *
-     * @param id KSM Id
+     * @param id OM Id
      *
      * @return MiniOzoneCluster.Builder
      */
-    public Builder setKsmId(String id) {
-      ksmId = Optional.of(id);
+    public Builder setOmId(String id) {
+      omId = Optional.of(id);
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index f0bfef1..b3137bf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -34,10 +34,10 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
-import org.apache.hadoop.ozone.ksm.KSMStorage;
+import org.apache.hadoop.ozone.om.OMStorage;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
@@ -73,7 +73,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
 
 /**
  * MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
- * running tests.  The cluster consists of a KeySpaceManager,
+ * running tests.  The cluster consists of a OzoneManager,
  * StorageContainerManager and multiple DataNodes.
  */
 @InterfaceAudience.Private
@@ -84,7 +84,7 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
 
   private final OzoneConfiguration conf;
   private final StorageContainerManager scm;
-  private final KeySpaceManager ksm;
+  private final OzoneManager ozoneManager;
   private final List<HddsDatanodeService> hddsDatanodes;
 
   /**
@@ -93,11 +93,11 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
    * @throws IOException if there is an I/O error
    */
   private MiniOzoneClusterImpl(OzoneConfiguration conf,
-                               KeySpaceManager ksm,
+                               OzoneManager ozoneManager,
                                StorageContainerManager scm,
                                List<HddsDatanodeService> hddsDatanodes) {
     this.conf = conf;
-    this.ksm = ksm;
+    this.ozoneManager = ozoneManager;
     this.scm = scm;
     this.hddsDatanodes = hddsDatanodes;
   }
@@ -147,8 +147,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
   }
 
   @Override
-  public KeySpaceManager getKeySpaceManager() {
-    return this.ksm;
+  public OzoneManager getOzoneManager() {
+    return this.ozoneManager;
   }
 
   @Override
@@ -209,9 +209,9 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
   }
 
   @Override
-  public void restartKeySpaceManager() throws IOException {
-    ksm.stop();
-    ksm.start();
+  public void restartOzoneManager() throws IOException {
+    ozoneManager.stop();
+    ozoneManager.start();
   }
 
   @Override
@@ -247,10 +247,10 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
               scm.getClientProtocolServer().getScmInfo().getClusterId()));
       FileUtils.deleteDirectory(baseDir);
 
-      if (ksm != null) {
-        LOG.info("Shutting down the keySpaceManager");
-        ksm.stop();
-        ksm.join();
+      if (ozoneManager != null) {
+        LOG.info("Shutting down the OzoneManager");
+        ozoneManager.stop();
+        ozoneManager.join();
       }
 
       if (scm != null) {
@@ -291,11 +291,11 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
       initializeConfiguration();
       StorageContainerManager scm = createSCM();
       scm.start();
-      KeySpaceManager ksm = createKSM();
-      ksm.start();
+      OzoneManager om = createOM();
+      om.start();
       List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
       hddsDatanodes.forEach((datanode) -> datanode.start(null));
-      return new MiniOzoneClusterImpl(conf, ksm, scm, hddsDatanodes);
+      return new MiniOzoneClusterImpl(conf, om, scm, hddsDatanodes);
     }
 
     /**
@@ -331,20 +331,20 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     }
 
     /**
-     * Creates a new KeySpaceManager instance.
+     * Creates a new OzoneManager instance.
      *
-     * @return {@link KeySpaceManager}
+     * @return {@link OzoneManager}
      *
      * @throws IOException
      */
-    private KeySpaceManager createKSM() throws IOException {
-      configureKSM();
-      KSMStorage ksmStore = new KSMStorage(conf);
-      ksmStore.setClusterId(clusterId);
-      ksmStore.setScmId(scmId.get());
-      ksmStore.setKsmId(ksmId.orElse(UUID.randomUUID().toString()));
-      ksmStore.initialize();
-      return KeySpaceManager.createKSM(null, conf);
+    private OzoneManager createOM() throws IOException {
+      configureOM();
+      OMStorage omStore = new OMStorage(conf);
+      omStore.setClusterId(clusterId);
+      omStore.setScmId(scmId.get());
+      omStore.setOmId(omId.orElse(UUID.randomUUID().toString()));
+      omStore.initialize();
+      return OzoneManager.createOm(null, conf);
     }
 
     /**
@@ -415,10 +415,10 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     }
 
 
-    private void configureKSM() {
-      conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "127.0.0.1:0");
-      conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-      conf.setInt(KSMConfigKeys.OZONE_KSM_HANDLER_COUNT_KEY, numOfKsmHandlers);
+    private void configureOM() {
+      conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0");
+      conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+      conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers);
     }
 
     private void configureHddsDatanodes() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 4898a1b..717bb68 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.TestConfigurationFieldsBase;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
 /**
@@ -31,7 +31,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
     xmlFilename = new String("ozone-default.xml");
     configurationClasses =
         new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
-            KSMConfigKeys.class};
+            OMConfigKeys.class};
     errorIfMissingConfigProps = true;
     errorIfMissingXmlProps = true;
     xmlPropsToSkipCompare.add("hadoop.tags.custom");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index dd1a8de..cc367b3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -60,8 +61,7 @@ import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
 import org.junit.rules.Timeout;
@@ -211,7 +211,7 @@ public class TestStorageContainerManager {
       // Create {numKeys} random names keys.
       TestStorageContainerManagerHelper helper =
           new TestStorageContainerManagerHelper(cluster, conf);
-      Map<String, KsmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
+      Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
 
       Map<Long, List<Long>> containerBlocks = createDeleteTXLog(delLog,
           keyLocations, helper);
@@ -293,7 +293,7 @@ public class TestStorageContainerManager {
     // Create {numKeys} random names keys.
     TestStorageContainerManagerHelper helper =
         new TestStorageContainerManagerHelper(cluster, conf);
-    Map<String, KsmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
+    Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
 
     createDeleteTXLog(delLog, keyLocations, helper);
     // Verify a few TX gets created in the TX log.
@@ -320,13 +320,13 @@ public class TestStorageContainerManager {
   }
 
   private Map<Long, List<Long>> createDeleteTXLog(DeletedBlockLog delLog,
-      Map<String, KsmKeyInfo> keyLocations,
+      Map<String, OmKeyInfo> keyLocations,
       TestStorageContainerManagerHelper helper) throws IOException {
     // These keys will be written into a bunch of containers,
     // gets a set of container names, verify container containerBlocks
     // on datanodes.
     Set<Long> containerNames = new HashSet<>();
-    for (Map.Entry<String, KsmKeyInfo> entry : keyLocations.entrySet()) {
+    for (Map.Entry<String, OmKeyInfo> entry : keyLocations.entrySet()) {
       entry.getValue().getLatestVersionLocations().getLocationList()
           .forEach(loc -> containerNames.add(loc.getContainerID()));
     }
@@ -334,7 +334,7 @@ public class TestStorageContainerManager {
     // Total number of containerBlocks of these containers should be equal to
     // total number of containerBlocks via creation call.
     int totalCreatedBlocks = 0;
-    for (KsmKeyInfo info : keyLocations.values()) {
+    for (OmKeyInfo info : keyLocations.values()) {
       totalCreatedBlocks += info.getKeyLocationVersions().size();
     }
     Assert.assertTrue(totalCreatedBlocks > 0);
@@ -343,8 +343,8 @@ public class TestStorageContainerManager {
 
     // Create a deletion TX for each key.
     Map<Long, List<Long>> containerBlocks = Maps.newHashMap();
-    for (KsmKeyInfo info : keyLocations.values()) {
-      List<KsmKeyLocationInfo> list =
+    for (OmKeyInfo info : keyLocations.values()) {
+      List<OmKeyLocationInfo> list =
           info.getLatestVersionLocations().getLocationList();
       list.forEach(location -> {
         if (containerBlocks.containsKey(location.getContainerID())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 4c2a904..a30c6f4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.web.handlers.BucketArgs;
 import org.apache.hadoop.ozone.web.handlers.KeyArgs;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
@@ -67,9 +67,9 @@ public class TestStorageContainerManagerHelper {
     storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
   }
 
-  public Map<String, KsmKeyInfo> createKeys(int numOfKeys, int keySize)
+  public Map<String, OmKeyInfo> createKeys(int numOfKeys, int keySize)
       throws Exception {
-    Map<String, KsmKeyInfo> keyLocationMap = Maps.newHashMap();
+    Map<String, OmKeyInfo> keyLocationMap = Maps.newHashMap();
     String volume = "volume" + RandomStringUtils.randomNumeric(5);
     String bucket = "bucket" + RandomStringUtils.randomNumeric(5);
     String userName = "user" + RandomStringUtils.randomNumeric(5);
@@ -104,12 +104,12 @@ public class TestStorageContainerManagerHelper {
     }
 
     for (String key : keyNames) {
-      KsmKeyArgs arg = new KsmKeyArgs.Builder()
+      OmKeyArgs arg = new OmKeyArgs.Builder()
           .setVolumeName(volume)
           .setBucketName(bucket)
           .setKeyName(key)
           .build();
-      KsmKeyInfo location = cluster.getKeySpaceManager()
+      OmKeyInfo location = cluster.getOzoneManager()
           .lookupKey(arg);
       keyLocationMap.put(key, location);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index 9918d63..0dc0399 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -77,10 +77,10 @@ public class TestOzoneRestClient {
         OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
     cluster.waitForClusterToBeReady();
-    InetSocketAddress ksmHttpAddress = cluster.getKeySpaceManager()
+    InetSocketAddress omHttpAddress = cluster.getOzoneManager()
         .getHttpServer().getHttpAddress();
-    ozClient = OzoneClientFactory.getRestClient(ksmHttpAddress.getHostName(),
-        ksmHttpAddress.getPort(), conf);
+    ozClient = OzoneClientFactory.getRestClient(omHttpAddress.getHostName(),
+        omHttpAddress.getPort(), conf);
     store = ozClient.getObjectStore();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index 214382e..2fbab36 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -39,10 +39,10 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -73,7 +73,7 @@ public class TestOzoneRpcClient {
   private static MiniOzoneCluster cluster = null;
   private static OzoneClient ozClient = null;
   private static ObjectStore store = null;
-  private static KeySpaceManager keySpaceManager;
+  private static OzoneManager ozoneManager;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
@@ -97,7 +97,7 @@ public class TestOzoneRpcClient {
     store = ozClient.getObjectStore();
     storageContainerLocationClient =
         cluster.getStorageContainerLocationClient();
-    keySpaceManager = cluster.getKeySpaceManager();
+    ozoneManager = cluster.getOzoneManager();
   }
 
   @Test
@@ -376,7 +376,7 @@ public class TestOzoneRpcClient {
   private boolean verifyRatisReplication(String volumeName, String bucketName,
       String keyName, ReplicationType type, ReplicationFactor factor)
       throws IOException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
@@ -385,8 +385,8 @@ public class TestOzoneRpcClient {
         HddsProtos.ReplicationType.valueOf(type.toString());
     HddsProtos.ReplicationFactor replicationFactor =
         HddsProtos.ReplicationFactor.valueOf(factor.getValue());
-    KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs);
-    for (KsmKeyLocationInfo info:
+    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
+    for (OmKeyLocationInfo info:
         keyInfo.getLatestVersionLocations().getLocationList()) {
       ContainerInfo container =
           storageContainerLocationClient.getContainer(info.getContainerID());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 43e3f50..62059ec 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -37,10 +37,10 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.ozShell.TestOzoneShell;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataStore;
@@ -61,7 +61,7 @@ public class TestBlockDeletion {
   private static ObjectStore store;
   private static ContainerManagerImpl dnContainerManager = null;
   private static StorageContainerManager scm = null;
-  private static KeySpaceManager ksm = null;
+  private static OzoneManager om = null;
   private static Set<Long> containerIdsWithDeletedBlocks;
 
   @BeforeClass
@@ -88,7 +88,7 @@ public class TestBlockDeletion {
     dnContainerManager =
         (ContainerManagerImpl) cluster.getHddsDatanodes().get(0)
             .getDatanodeStateMachine().getContainer().getContainerManager();
-    ksm = cluster.getKeySpaceManager();
+    om = cluster.getOzoneManager();
     scm = cluster.getStorageContainerManager();
     containerIdsWithDeletedBlocks = new HashSet<>();
   }
@@ -112,23 +112,23 @@ public class TestBlockDeletion {
     out.write(value.getBytes());
     out.close();
 
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName(volumeName)
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
         .setBucketName(bucketName).setKeyName(keyName).setDataSize(0)
         .setType(HddsProtos.ReplicationType.STAND_ALONE)
         .setFactor(HddsProtos.ReplicationFactor.ONE).build();
-    List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroupList =
-        ksm.lookupKey(keyArgs).getKeyLocationVersions();
+    List<OmKeyLocationInfoGroup> omKeyLocationInfoGroupList =
+        om.lookupKey(keyArgs).getKeyLocationVersions();
 
     // verify key blocks were created in DN.
-    Assert.assertTrue(verifyBlocksCreated(ksmKeyLocationInfoGroupList));
+    Assert.assertTrue(verifyBlocksCreated(omKeyLocationInfoGroupList));
     // No containers with deleted blocks
     Assert.assertTrue(containerIdsWithDeletedBlocks.isEmpty());
     // Delete transactionIds for the containers should be 0
     matchContainerTransactionIds();
-    ksm.deleteKey(keyArgs);
+    om.deleteKey(keyArgs);
     Thread.sleep(5000);
     // The blocks should be deleted in the DN.
-    Assert.assertTrue(verifyBlocksDeleted(ksmKeyLocationInfoGroupList));
+    Assert.assertTrue(verifyBlocksDeleted(omKeyLocationInfoGroupList));
 
     // Few containers with deleted blocks
     Assert.assertTrue(!containerIdsWithDeletedBlocks.isEmpty());
@@ -155,7 +155,7 @@ public class TestBlockDeletion {
   }
 
   private boolean verifyBlocksCreated(
-      List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
     return performOperationOnKeyContainers((blockID) -> {
       try {
@@ -166,11 +166,11 @@ public class TestBlockDeletion {
       } catch (IOException e) {
         e.printStackTrace();
       }
-    }, ksmKeyLocationInfoGroups);
+    }, omKeyLocationInfoGroups);
   }
 
   private boolean verifyBlocksDeleted(
-      List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
     return performOperationOnKeyContainers((blockID) -> {
       try {
@@ -186,19 +186,20 @@ public class TestBlockDeletion {
       } catch (IOException e) {
         e.printStackTrace();
       }
-    }, ksmKeyLocationInfoGroups);
+    }, omKeyLocationInfoGroups);
   }
 
   private boolean performOperationOnKeyContainers(Consumer<BlockID> consumer,
-      List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
 
     try {
-      for (KsmKeyLocationInfoGroup ksmKeyLocationInfoGroup : ksmKeyLocationInfoGroups) {
-        List<KsmKeyLocationInfo> ksmKeyLocationInfos =
-            ksmKeyLocationInfoGroup.getLocationList();
-        for (KsmKeyLocationInfo ksmKeyLocationInfo : ksmKeyLocationInfos) {
-          BlockID blockID = ksmKeyLocationInfo.getBlockID();
+      for (OmKeyLocationInfoGroup omKeyLocationInfoGroup :
+          omKeyLocationInfoGroups) {
+        List<OmKeyLocationInfo> omKeyLocationInfos =
+            omKeyLocationInfoGroup.getLocationList();
+        for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
+          BlockID blockID = omKeyLocationInfo.getBlockID();
           consumer.accept(blockID);
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 3e514e7..58b831b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -34,8 +34,8 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
@@ -45,7 +45,6 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
 public class TestCloseContainerByPipeline {
@@ -98,17 +97,17 @@ public class TestCloseContainerByPipeline {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs =
-        new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
+    OmKeyArgs keyArgs =
+        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
             .setType(HddsProtos.ReplicationType.STAND_ALONE)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
             .setKeyName("testCloseContainer").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
     List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
         .getScmContainerManager().getContainerWithPipeline(containerID)
         .getPipeline().getMachines();
@@ -153,17 +152,17 @@ public class TestCloseContainerByPipeline {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs =
-        new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
+    OmKeyArgs keyArgs =
+        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
             .setType(HddsProtos.ReplicationType.STAND_ALONE)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
             .setKeyName("standalone").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
     List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
         .getScmContainerManager().getContainerWithPipeline(containerID)
         .getPipeline().getMachines();
@@ -207,16 +206,16 @@ public class TestCloseContainerByPipeline {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName("test").
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test").
         setBucketName("test").setType(HddsProtos.ReplicationType.RATIS)
         .setFactor(HddsProtos.ReplicationFactor.THREE).setDataSize(1024)
         .setKeyName("ratis").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
     List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
         .getScmContainerManager().getContainerWithPipeline(containerID)
         .getPipeline().getMachines();
@@ -232,7 +231,7 @@ public class TestCloseContainerByPipeline {
           .addDatanodeCommand(details.getUuid(),
               new CloseContainerCommand(containerID,
                   HddsProtos.ReplicationType.RATIS));
-  }
+    }
 
     for (DatanodeDetails datanodeDetails : datanodes) {
       GenericTestUtils.waitFor(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index efb7344..58a5154 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -28,8 +28,8 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;
@@ -69,17 +69,17 @@ public class TestCloseContainerHandler {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs =
-        new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
+    OmKeyArgs keyArgs =
+        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
             .setType(HddsProtos.ReplicationType.STAND_ALONE)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
             .setKeyName("test").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
 
     Assert.assertFalse(isContainerClosed(cluster, containerID));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
deleted file mode 100644
index 1cc7ff8..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * This class tests container report with DN container state info.
- */
-public class TestContainerReportWithKeys {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      TestContainerReportWithKeys.class);
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneConfiguration conf;
-  private static StorageContainerManager scm;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testContainerReportKeyWrite() throws Exception {
-    final String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    final String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    final int keySize = 100;
-
-    OzoneClient client = OzoneClientFactory.getClient(conf);
-    ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
-                ReplicationFactor.ONE);
-    String dataString = RandomStringUtils.randomAlphabetic(keySize);
-    key.write(dataString.getBytes());
-    key.close();
-
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize)
-        .build();
-
-
-    KsmKeyLocationInfo keyInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    ContainerData cd = getContainerData(keyInfo.getContainerID());
-
-    LOG.info("DN Container Data:  keyCount: {} used: {} ",
-        cd.getKeyCount(), cd.getBytesUsed());
-
-    ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
-
-    LOG.info("SCM Container Info keyCount: {} usedBytes: {}",
-        cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
-  }
-
-
-  private static ContainerData getContainerData(long containerID) {
-    ContainerData containerData;
-    try {
-      ContainerManager containerManager = cluster.getHddsDatanodes().get(0)
-          .getDatanodeStateMachine().getContainer().getContainerManager();
-      containerData = containerManager.readContainer(containerID);
-    } catch (StorageContainerException e) {
-      throw new AssertionError(e);
-    }
-    return containerData;
-  }
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java
deleted file mode 100644
index 6b42c27..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-
-/**
- * A class that encapsulates the KsmVolumeArgs Args.
- */
-public final class KsmVolumeArgs {
-  private final String adminName;
-  private final String ownerName;
-  private final String volume;
-  private final long creationTime;
-  private final long quotaInBytes;
-  private final Map<String, String> keyValueMap;
-  private final KsmOzoneAclMap aclMap;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param adminName  - Administrator's name.
-   * @param ownerName  - Volume owner's name
-   * @param volume - volume name
-   * @param quotaInBytes - Volume Quota in bytes.
-   * @param keyValueMap - keyValue map.
-   * @param aclMap - User to access rights map.
-   * @param creationTime - Volume creation time.
-   */
-  private KsmVolumeArgs(String adminName, String ownerName, String volume,
-                        long quotaInBytes, Map<String, String> keyValueMap,
-                        KsmOzoneAclMap aclMap, long creationTime) {
-    this.adminName = adminName;
-    this.ownerName = ownerName;
-    this.volume = volume;
-    this.quotaInBytes = quotaInBytes;
-    this.keyValueMap = keyValueMap;
-    this.aclMap = aclMap;
-    this.creationTime = creationTime;
-  }
-
-  /**
-   * Returns the Admin Name.
-   * @return String.
-   */
-  public String getAdminName() {
-    return adminName;
-  }
-
-  /**
-   * Returns the owner Name.
-   * @return String
-   */
-  public String getOwnerName() {
-    return ownerName;
-  }
-
-  /**
-   * Returns the volume Name.
-   * @return String
-   */
-  public String getVolume() {
-    return volume;
-  }
-
-  /**
-   * Returns creation time.
-   * @return long
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns Quota in Bytes.
-   * @return long, Quota in bytes.
-   */
-  public long getQuotaInBytes() {
-    return quotaInBytes;
-  }
-
-  public Map<String, String> getKeyValueMap() {
-    return keyValueMap;
-  }
-
-  public KsmOzoneAclMap getAclMap() {
-    return aclMap;
-  }
-  /**
-   * Returns new builder class that builds a KsmVolumeArgs.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder for KsmVolumeArgs.
-   */
-  public static class Builder {
-    private String adminName;
-    private String ownerName;
-    private String volume;
-    private long creationTime;
-    private long quotaInBytes;
-    private Map<String, String> keyValueMap;
-    private KsmOzoneAclMap aclMap;
-
-    /**
-     * Constructs a builder.
-     */
-    Builder() {
-      keyValueMap = new HashMap<>();
-      aclMap = new KsmOzoneAclMap();
-    }
-
-    public Builder setAdminName(String admin) {
-      this.adminName = admin;
-      return this;
-    }
-
-    public Builder setOwnerName(String owner) {
-      this.ownerName = owner;
-      return this;
-    }
-
-    public Builder setVolume(String volumeName) {
-      this.volume = volumeName;
-      return this;
-    }
-
-    public Builder setCreationTime(long createdOn) {
-      this.creationTime = createdOn;
-      return this;
-    }
-
-    public Builder setQuotaInBytes(long quota) {
-      this.quotaInBytes = quota;
-      return this;
-    }
-
-    public Builder addMetadata(String key, String value) {
-      keyValueMap.put(key, value); // overwrite if present.
-      return this;
-    }
-
-    public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException {
-      aclMap.addAcl(acl);
-      return this;
-    }
-
-    /**
-     * Constructs a CreateVolumeArgument.
-     * @return CreateVolumeArgs.
-     */
-    public KsmVolumeArgs build() {
-      Preconditions.checkNotNull(adminName);
-      Preconditions.checkNotNull(ownerName);
-      Preconditions.checkNotNull(volume);
-      return new KsmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
-          keyValueMap, aclMap, creationTime);
-    }
-  }
-
-  public VolumeInfo getProtobuf() {
-    List<KeyValue> metadataList = new LinkedList<>();
-    for (Map.Entry<String, String> entry : keyValueMap.entrySet()) {
-      metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()).
-          setValue(entry.getValue()).build());
-    }
-    List<OzoneAclInfo> aclList = aclMap.ozoneAclGetProtobuf();
-
-    return VolumeInfo.newBuilder()
-        .setAdminName(adminName)
-        .setOwnerName(ownerName)
-        .setVolume(volume)
-        .setQuotaInBytes(quotaInBytes)
-        .addAllMetadata(metadataList)
-        .addAllVolumeAcls(aclList)
-        .setCreationTime(creationTime)
-        .build();
-  }
-
-  public static KsmVolumeArgs getFromProtobuf(VolumeInfo volInfo) {
-    Map<String, String> kvMap = volInfo.getMetadataList().stream()
-        .collect(Collectors.toMap(KeyValue::getKey,
-            KeyValue::getValue));
-    KsmOzoneAclMap aclMap =
-        KsmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList());
-
-    return new KsmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(),
-        volInfo.getVolume(), volInfo.getQuotaInBytes(), kvMap, aclMap,
-        volInfo.getCreationTime());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java
deleted file mode 100644
index c19c04b..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-/**
- * This class represents a open key "session". A session here means a key is
- * opened by a specific client, the client sends the handler to server, such
- * that servers can recognize this client, and thus know how to close the key.
- */
-public class OpenKeySession {
-  private final int id;
-  private final KsmKeyInfo keyInfo;
-  // the version of the key when it is being opened in this session.
-  // a block that has a create version equals to open version means it will
-  // be committed only when this open session is closed.
-  private long openVersion;
-
-  public OpenKeySession(int id, KsmKeyInfo info, long version) {
-    this.id = id;
-    this.keyInfo = info;
-    this.openVersion = version;
-  }
-
-  public long getOpenVersion() {
-    return this.openVersion;
-  }
-
-  public KsmKeyInfo getKeyInfo() {
-    return keyInfo;
-  }
-
-  public int getId() {
-    return id;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java
deleted file mode 100644
index e07232d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.helpers;
-
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos
-    .ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * ServiceInfo holds the config details of Ozone services.
- */
-public final class ServiceInfo {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ServiceInfo.class);
-  private static final ObjectWriter WRITER =
-      new ObjectMapper().writerWithDefaultPrettyPrinter();
-
-  /**
-   * Type of node/service.
-   */
-  private NodeType nodeType;
-  /**
-   * Hostname of the node in which the service is running.
-   */
-  private String hostname;
-
-  /**
-   * List of ports the service listens to.
-   */
-  private Map<ServicePort.Type, Integer> ports;
-
-  /**
-   * Default constructor for JSON deserialization.
-   */
-  public ServiceInfo() {}
-
-  /**
-   * Constructs the ServiceInfo for the {@code nodeType}.
-   * @param nodeType type of node/service
-   * @param hostname hostname of the service
-   * @param portList list of ports the service listens to
-   */
-  private ServiceInfo(
-      NodeType nodeType, String hostname, List<ServicePort> portList) {
-    Preconditions.checkNotNull(nodeType);
-    Preconditions.checkNotNull(hostname);
-    this.nodeType = nodeType;
-    this.hostname = hostname;
-    this.ports = new HashMap<>();
-    for (ServicePort port : portList) {
-      ports.put(port.getType(), port.getValue());
-    }
-  }
-
-  /**
-   * Returns the type of node/service.
-   * @return node type
-   */
-  public NodeType getNodeType() {
-    return nodeType;
-  }
-
-  /**
-   * Returns the hostname of the service.
-   * @return hostname
-   */
-  public String getHostname() {
-    return hostname;
-  }
-
-  /**
-   * Returns ServicePort.Type to port mappings.
-   * @return ports
-   */
-  public Map<ServicePort.Type, Integer> getPorts() {
-    return ports;
-  }
-
-  /**
-   * Returns the port for given type, null if the service doesn't support
-   * the type.
-   *
-   * @param type the type of port.
-   *             ex: RPC, HTTP, HTTPS, etc..
-   */
-  @JsonIgnore
-  public int getPort(ServicePort.Type type) {
-    return ports.get(type);
-  }
-
-  /**
-   * Converts {@link ServiceInfo} to KeySpaceManagerProtocolProtos.ServiceInfo.
-   *
-   * @return KeySpaceManagerProtocolProtos.ServiceInfo
-   */
-  @JsonIgnore
-  public KeySpaceManagerProtocolProtos.ServiceInfo getProtobuf() {
-    KeySpaceManagerProtocolProtos.ServiceInfo.Builder builder =
-        KeySpaceManagerProtocolProtos.ServiceInfo.newBuilder();
-    builder.setNodeType(nodeType)
-        .setHostname(hostname)
-        .addAllServicePorts(
-            ports.entrySet().stream()
-                .map(
-                    entry ->
-                        ServicePort.newBuilder()
-                            .setType(entry.getKey())
-                            .setValue(entry.getValue()).build())
-                .collect(Collectors.toList()));
-    return builder.build();
-  }
-
-  /**
-   * Converts KeySpaceManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}.
-   *
-   * @return {@link ServiceInfo}
-   */
-  @JsonIgnore
-  public static ServiceInfo getFromProtobuf(
-      KeySpaceManagerProtocolProtos.ServiceInfo serviceInfo) {
-    return new ServiceInfo(serviceInfo.getNodeType(),
-        serviceInfo.getHostname(),
-        serviceInfo.getServicePortsList());
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Parse a JSON string into ServiceInfo Object.
-   *
-   * @param jsonString Json String
-   * @return BucketInfo
-   * @throws IOException
-   */
-  public static BucketInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  /**
-   * Creates a new builder to build {@link ServiceInfo}.
-   * @return {@link ServiceInfo.Builder}
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder used to build/construct {@link ServiceInfo}.
-   */
-  public static class Builder {
-
-    private NodeType node;
-    private String host;
-    private List<ServicePort> portList = new ArrayList<>();
-
-
-    /**
-     * Sets the node/service type.
-     * @param nodeType type of node
-     * @return the builder
-     */
-    public Builder setNodeType(NodeType nodeType) {
-      node = nodeType;
-      return this;
-    }
-
-    /**
-     * Sets the hostname of the service.
-     * @param hostname service hostname
-     * @return the builder
-     */
-    public Builder setHostname(String hostname) {
-      host = hostname;
-      return this;
-    }
-
-    /**
-     * Adds the service port to the service port list.
-     * @param servicePort RPC port
-     * @return the builder
-     */
-    public Builder addServicePort(ServicePort servicePort) {
-      portList.add(servicePort);
-      return this;
-    }
-
-
-    /**
-     * Builds and returns {@link ServiceInfo} with the set values.
-     * @return {@link ServiceInfo}
-     */
-    public ServiceInfo build() {
-      return new ServiceInfo(node, host, portList);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java
deleted file mode 100644
index 1a3d486..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A class that encapsulates the createVolume Args.
- */
-public final class VolumeArgs {
-  private final String adminName;
-  private final String ownerName;
-  private final String volume;
-  private final long quotaInBytes;
-  private final Map<String, String> extendedAttributes;
-
-  /**
-   * Private constructor, constructed via builder.
-   *
-   * @param adminName - Administrator name.
-   * @param ownerName - Volume owner's name
-   * @param volume - volume name
-   * @param quotaInBytes - Volume Quota in bytes.
-   * @param keyValueMap - keyValue map.
-   */
-  private VolumeArgs(String adminName, String ownerName, String volume,
-      long quotaInBytes, Map<String, String> keyValueMap) {
-    this.adminName = adminName;
-    this.ownerName = ownerName;
-    this.volume = volume;
-    this.quotaInBytes = quotaInBytes;
-    this.extendedAttributes = keyValueMap;
-  }
-
-  /**
-   * Returns the Admin Name.
-   *
-   * @return String.
-   */
-  public String getAdminName() {
-    return adminName;
-  }
-
-  /**
-   * Returns the owner Name.
-   *
-   * @return String
-   */
-  public String getOwnerName() {
-    return ownerName;
-  }
-
-  /**
-   * Returns the volume Name.
-   *
-   * @return String
-   */
-  public String getVolume() {
-    return volume;
-  }
-
-  /**
-   * Returns Quota in Bytes.
-   *
-   * @return long, Quota in bytes.
-   */
-  public long getQuotaInBytes() {
-    return quotaInBytes;
-  }
-
-  public Map<String, String> getExtendedAttributes() {
-    return extendedAttributes;
-  }
-
-  static class Builder {
-    private String adminName;
-    private String ownerName;
-    private String volume;
-    private long quotaInBytes;
-    private Map<String, String> extendedAttributes;
-
-    /**
-     * Constructs a builder.
-     */
-    Builder() {
-      extendedAttributes = new HashMap<>();
-    }
-
-    public void setAdminName(String adminName) {
-      this.adminName = adminName;
-    }
-
-    public void setOwnerName(String ownerName) {
-      this.ownerName = ownerName;
-    }
-
-    public void setVolume(String volume) {
-      this.volume = volume;
-    }
-
-    public void setQuotaInBytes(long quotaInBytes) {
-      this.quotaInBytes = quotaInBytes;
-    }
-
-    public void addMetadata(String key, String value) {
-      extendedAttributes.put(key, value); // overwrite if present.
-    }
-
-    /**
-     * Constructs a CreateVolumeArgument.
-     *
-     * @return CreateVolumeArgs.
-     */
-    public VolumeArgs build() {
-      Preconditions.checkNotNull(adminName);
-      Preconditions.checkNotNull(ownerName);
-      Preconditions.checkNotNull(volume);
-      return new VolumeArgs(adminName, ownerName, volume, quotaInBytes,
-          extendedAttributes);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java
deleted file mode 100644
index ce627a5..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
deleted file mode 100644
index 7698ee1..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-/**
- This package contains client side protocol library to communicate with KSM.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
deleted file mode 100644
index 54862d3..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.protocol;
-
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Protocol to talk to KSM.
- */
-public interface KeySpaceManagerProtocol {
-
-  /**
-   * Creates a volume.
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  void createVolume(KsmVolumeArgs args) throws IOException;
-
-  /**
-   * Changes the owner of a volume.
-   * @param volume  - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  void setOwner(String volume, String owner) throws IOException;
-
-  /**
-   * Changes the Quota on a volume.
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  void setQuota(String volume, long quota) throws IOException;
-
-  /**
-   * Checks if the specified user can access this volume.
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException;
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  KsmVolumeArgs getVolumeInfo(String volume) throws IOException;
-
-  /**
-   * Deletes an existing empty volume.
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  void deleteVolume(String volume) throws IOException;
-
-  /**
-   * Lists volume owned by a specific user.
-   * @param userName - user name
-   * @param prefix  - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listVolumeByUser(String userName, String prefix, String
-      prevKey, int maxKeys) throws IOException;
-
-  /**
-   * Lists volume all volumes in the cluster.
-   * @param prefix  - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listAllVolumes(String prefix, String
-      prevKey, int maxKeys) throws IOException;
-
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - BucketInfo to create Bucket.
-   * @throws IOException
-   */
-  void createBucket(KsmBucketInfo bucketInfo) throws IOException;
-
-  /**
-   * Gets the bucket information.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @return KsmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  KsmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  void setBucketProperty(KsmBucketArgs args) throws IOException;
-
-  /**
-   * Open the given key and return an open key session.
-   *
-   * @param args the args of the key.
-   * @return OpenKeySession instance that client uses to talk to container.
-   * @throws IOException
-   */
-  OpenKeySession openKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Commit a key. This will make the change from the client visible. The client
-   * is identified by the clientID.
-   *
-   * @param args the key to commit
-   * @param clientID the client identification
-   * @throws IOException
-   */
-  void commitKey(KsmKeyArgs args, int clientID) throws IOException;
-
-  /**
-   * Allocate a new block, it is assumed that the client is having an open key
-   * session going on. This block will be appended to this open key session.
-   *
-   * @param args the key to append
-   * @param clientID the client identification
-   * @return an allocated block
-   * @throws IOException
-   */
-  KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException;
-
-  /**
-   * Look up for the container of an existing key.
-   *
-   * @param args the args of the key.
-   * @return KsmKeyInfo instance that client uses to talk to container.
-   * @throws IOException
-   */
-  KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Rename an existing key within a bucket
-   * @param args the args of the key.
-   * @param toKeyName New name to be used for the Key
-   */
-  void renameKey(KsmKeyArgs args, String toKeyName) throws IOException;
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args the args of the key.
-   * @throws IOException
-   */
-  void deleteKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  void deleteBucket(String volume, String bucket) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link KsmBucketInfo}
-   * in the given volume. Argument volumeName is required, others
-   * are optional.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param startBucketName
-   *   the start bucket name, only the buckets whose name is
-   *   after this value will be included in the result.
-   * @param bucketPrefix
-   *   bucket name prefix, only the buckets whose name has
-   *   this prefix will be included in the result.
-   * @param maxNumOfBuckets
-   *   the maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<KsmBucketInfo> listBuckets(String volumeName,
-      String startBucketName, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link KsmKeyInfo}
-   * in the given bucket. Argument volumeName, bucketName is required,
-   * others are optional.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKeyName
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<KsmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKeyName, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns list of Ozone services with its configuration details.
-   *
-   * @return list of Ozone services
-   * @throws IOException
-   */
-  List<ServiceInfo> getServiceList() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java
deleted file mode 100644
index f77e5fd..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.protocol;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
deleted file mode 100644
index 0f38169..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,769 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.protocolPB;
-
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListResponse;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.stream.Collectors;
-
-/**
- *  The client side implementation of KeySpaceManagerProtocol.
- */
-
-@InterfaceAudience.Private
-public final class KeySpaceManagerProtocolClientSideTranslatorPB
-    implements KeySpaceManagerProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final KeySpaceManagerProtocolPB rpcProxy;
-
-  /**
-   * Constructor for KeySpaceManger Client.
-   * @param rpcProxy
-   */
-  public KeySpaceManagerProtocolClientSideTranslatorPB(
-      KeySpaceManagerProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the
-   * close may fail require careful attention. It is strongly advised
-   * to relinquish the underlying resources and to internally
-   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
-   * the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  /**
-   * Creates a volume.
-   *
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  @Override
-  public void createVolume(KsmVolumeArgs args) throws IOException {
-    CreateVolumeRequest.Builder req =
-        CreateVolumeRequest.newBuilder();
-    VolumeInfo volumeInfo = args.getProtobuf();
-    req.setVolumeInfo(volumeInfo);
-
-    final CreateVolumeResponse resp;
-    try {
-      resp = rpcProxy.createVolume(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Volume creation failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    SetVolumePropertyRequest.Builder req =
-        SetVolumePropertyRequest.newBuilder();
-    req.setVolumeName(volume).setOwnerName(owner);
-    final SetVolumePropertyResponse resp;
-    try {
-      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Volume owner change failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  @Override
-  public void setQuota(String volume, long quota) throws IOException {
-    SetVolumePropertyRequest.Builder req =
-        SetVolumePropertyRequest.newBuilder();
-    req.setVolumeName(volume).setQuotaInBytes(quota);
-    final SetVolumePropertyResponse resp;
-    try {
-      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Volume quota change failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Checks if the specified user can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws
-      IOException {
-    CheckVolumeAccessRequest.Builder req =
-        CheckVolumeAccessRequest.newBuilder();
-    req.setVolumeName(volume).setUserAcl(userAcl);
-    final CheckVolumeAccessResponse resp;
-    try {
-      resp = rpcProxy.checkVolumeAccess(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.ACCESS_DENIED) {
-      return false;
-    } else if (resp.getStatus() == Status.OK) {
-      return true;
-    } else {
-      throw new
-          IOException("Check Volume Access failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   *
-   * @param volume - Volume name.
-   * @return KsmVolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder();
-    req.setVolumeName(volume);
-    final InfoVolumeResponse resp;
-    try {
-      resp = rpcProxy.infoVolume(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Info Volume failed, error:" + resp.getStatus());
-    }
-    return KsmVolumeArgs.getFromProtobuf(resp.getVolumeInfo());
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder();
-    req.setVolumeName(volume);
-    final DeleteVolumeResponse resp;
-    try {
-      resp = rpcProxy.deleteVolume(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Delete Volume failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Lists volume owned by a specific user.
-   *
-   * @param userName - user name
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listVolumeByUser(String userName, String prefix,
-                                              String prevKey, int maxKeys)
-      throws IOException {
-    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
-    if (!Strings.isNullOrEmpty(prefix)) {
-      builder.setPrefix(prefix);
-    }
-    if (!Strings.isNullOrEmpty(prevKey)) {
-      builder.setPrevKey(prevKey);
-    }
-    builder.setMaxKeys(maxKeys);
-    builder.setUserName(userName);
-    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER);
-    return listVolume(builder.build());
-  }
-
-  /**
-   * Lists volume all volumes in the cluster.
-   *
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listAllVolumes(String prefix, String prevKey,
-      int maxKeys) throws IOException {
-    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
-    if (!Strings.isNullOrEmpty(prefix)) {
-      builder.setPrefix(prefix);
-    }
-    if (!Strings.isNullOrEmpty(prevKey)) {
-      builder.setPrevKey(prevKey);
-    }
-    builder.setMaxKeys(maxKeys);
-    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER);
-    return listVolume(builder.build());
-  }
-
-  private List<KsmVolumeArgs> listVolume(ListVolumeRequest request)
-      throws IOException {
-    final ListVolumeResponse resp;
-    try {
-      resp = rpcProxy.listVolumes(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("List volume failed, error: "
-          + resp.getStatus());
-    }
-
-    List<KsmVolumeArgs> result = Lists.newArrayList();
-    for (VolumeInfo volInfo : resp.getVolumeInfoList()) {
-      KsmVolumeArgs volArgs = KsmVolumeArgs.getFromProtobuf(volInfo);
-      result.add(volArgs);
-    }
-
-    return resp.getVolumeInfoList().stream()
-        .map(item -> KsmVolumeArgs.getFromProtobuf(item))
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Creates a bucket.
-   *
-   * @param bucketInfo - BucketInfo to create bucket.
-   * @throws IOException
-   */
-  @Override
-  public void createBucket(KsmBucketInfo bucketInfo) throws IOException {
-    CreateBucketRequest.Builder req =
-        CreateBucketRequest.newBuilder();
-    BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf();
-    req.setBucketInfo(bucketInfoProtobuf);
-
-    final CreateBucketResponse resp;
-    try {
-      resp = rpcProxy.createBucket(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Bucket creation failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Gets the bucket information.
-   *
-   * @param volume - Volume name.
-   * @param bucket - Bucket name.
-   * @return KsmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmBucketInfo getBucketInfo(String volume, String bucket)
-      throws IOException {
-    InfoBucketRequest.Builder req =
-        InfoBucketRequest.newBuilder();
-    req.setVolumeName(volume);
-    req.setBucketName(bucket);
-
-    final InfoBucketResponse resp;
-    try {
-      resp = rpcProxy.infoBucket(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() == Status.OK) {
-      return KsmBucketInfo.getFromProtobuf(resp.getBucketInfo());
-    } else {
-      throw new IOException("Info Bucket failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(KsmBucketArgs args)
-      throws IOException {
-    SetBucketPropertyRequest.Builder req =
-        SetBucketPropertyRequest.newBuilder();
-    BucketArgs bucketArgs = args.getProtobuf();
-    req.setBucketArgs(bucketArgs);
-    final SetBucketPropertyResponse resp;
-    try {
-      resp = rpcProxy.setBucketProperty(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Setting bucket property failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * List buckets in a volume.
-   *
-   * @param volumeName
-   * @param startKey
-   * @param prefix
-   * @param count
-   * @return
-   * @throws IOException
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(String volumeName,
-      String startKey, String prefix, int count) throws IOException {
-    List<KsmBucketInfo> buckets = new ArrayList<>();
-    ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder();
-    reqBuilder.setVolumeName(volumeName);
-    reqBuilder.setCount(count);
-    if (startKey != null) {
-      reqBuilder.setStartKey(startKey);
-    }
-    if (prefix != null) {
-      reqBuilder.setPrefix(prefix);
-    }
-    ListBucketsRequest request = reqBuilder.build();
-    final ListBucketsResponse resp;
-    try {
-      resp = rpcProxy.listBuckets(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.OK) {
-      buckets.addAll(
-          resp.getBucketInfoList().stream()
-              .map(KsmBucketInfo::getFromProtobuf)
-              .collect(Collectors.toList()));
-      return buckets;
-    } else {
-      throw new IOException("List Buckets failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Create a new open session of the key, then use the returned meta info to
-   * talk to data node to actually write the key.
-   * @param args the args for the key to be allocated
-   * @return a handler to the key, returned client
-   * @throws IOException
-   */
-  @Override
-  public OpenKeySession openKey(KsmKeyArgs args) throws IOException {
-    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setFactor(args.getFactor())
-        .setType(args.getType())
-        .setKeyName(args.getKeyName());
-    if (args.getDataSize() > 0) {
-      keyArgs.setDataSize(args.getDataSize());
-    }
-    req.setKeyArgs(keyArgs.build());
-
-    final LocateKeyResponse resp;
-    try {
-      resp = rpcProxy.createKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Create key failed, error:" + resp.getStatus());
-    }
-    return new OpenKeySession(resp.getID(),
-        KsmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
-  }
-
-  @Override
-  public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException {
-    AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-    req.setClientID(clientID);
-
-    final AllocateBlockResponse resp;
-    try {
-      resp = rpcProxy.allocateBlock(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Allocate block failed, error:" +
-          resp.getStatus());
-    }
-    return KsmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation());
-  }
-
-  @Override
-  public void commitKey(KsmKeyArgs args, int clientID)
-      throws IOException {
-    CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-    req.setClientID(clientID);
-
-    final CommitKeyResponse resp;
-    try {
-      resp = rpcProxy.commitKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Commit key failed, error:" +
-          resp.getStatus());
-    }
-  }
-
-
-  @Override
-  public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException {
-    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-
-    final LocateKeyResponse resp;
-    try {
-      resp = rpcProxy.lookupKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Lookup key failed, error:" +
-          resp.getStatus());
-    }
-    return KsmKeyInfo.getFromProtobuf(resp.getKeyInfo());
-  }
-
-  @Override
-  public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException {
-    RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-    req.setToKeyName(toKeyName);
-
-    final RenameKeyResponse resp;
-    try {
-      resp = rpcProxy.renameKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Rename key failed, error:" +
-          resp.getStatus());
-    }
-  }
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args the args of the key.
-   * @throws IOException
-   */
-  @Override
-  public void deleteKey(KsmKeyArgs args) throws IOException {
-    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName()).build();
-    req.setKeyArgs(keyArgs);
-
-    final LocateKeyResponse resp;
-    try {
-      resp = rpcProxy.deleteKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Delete key failed, error:" +
-          resp.getStatus());
-    }
-  }
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  public void deleteBucket(String volume, String bucket) throws IOException {
-    DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder();
-    req.setVolumeName(volume);
-    req.setBucketName(bucket);
-    final DeleteBucketResponse resp;
-    try {
-      resp = rpcProxy.deleteBucket(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Delete Bucket failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * List keys in a bucket.
-   */
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String prefix, int maxKeys) throws IOException {
-    List<KsmKeyInfo> keys = new ArrayList<>();
-    ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder();
-    reqBuilder.setVolumeName(volumeName);
-    reqBuilder.setBucketName(bucketName);
-    reqBuilder.setCount(maxKeys);
-
-    if (startKey != null) {
-      reqBuilder.setStartKey(startKey);
-    }
-
-    if (prefix != null) {
-      reqBuilder.setPrefix(prefix);
-    }
-
-    ListKeysRequest request = reqBuilder.build();
-    final ListKeysResponse resp;
-    try {
-      resp = rpcProxy.listKeys(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.OK) {
-      keys.addAll(
-          resp.getKeyInfoList().stream()
-              .map(KsmKeyInfo::getFromProtobuf)
-              .collect(Collectors.toList()));
-      return keys;
-    } else {
-      throw new IOException("List Keys failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  @Override
-  public List<ServiceInfo> getServiceList() throws IOException {
-    ServiceListRequest request = ServiceListRequest.newBuilder().build();
-    final ServiceListResponse resp;
-    try {
-      resp = rpcProxy.getServiceList(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.OK) {
-      return resp.getServiceInfoList().stream()
-              .map(ServiceInfo::getFromProtobuf)
-              .collect(Collectors.toList());
-    } else {
-      throw new IOException("Getting service list failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Return the proxy object underlying this protocol translator.
-   *
-   * @return the proxy object underlying this protocol translator.
-   */
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
deleted file mode 100644
index 8acca8a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeySpaceManagerService;
-
-/**
- * Protocol used to communicate with KSM.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-public interface KeySpaceManagerProtocolPB
-    extends KeySpaceManagerService.BlockingInterface {
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
deleted file mode 100644
index 67f9f7b..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.protocolPB;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
new file mode 100644
index 0000000..b9ca296
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+/**
+ * Ozone Manager Constants.
+ */
+public final class OMConfigKeys {
+  /**
+   * Never constructed.
+   */
+  private OMConfigKeys() {
+  }
+
+
+  public static final String OZONE_OM_HANDLER_COUNT_KEY =
+      "ozone.om.handler.count.key";
+  public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20;
+
+  public static final String OZONE_OM_ADDRESS_KEY =
+      "ozone.om.address";
+  public static final String OZONE_OM_BIND_HOST_DEFAULT =
+      "0.0.0.0";
+  public static final int OZONE_OM_PORT_DEFAULT = 9862;
+
+  public static final String OZONE_OM_HTTP_ENABLED_KEY =
+      "ozone.om.http.enabled";
+  public static final String OZONE_OM_HTTP_BIND_HOST_KEY =
+      "ozone.om.http-bind-host";
+  public static final String OZONE_OM_HTTPS_BIND_HOST_KEY =
+      "ozone.om.https-bind-host";
+  public static final String OZONE_OM_HTTP_ADDRESS_KEY =
+      "ozone.om.http-address";
+  public static final String OZONE_OM_HTTPS_ADDRESS_KEY =
+      "ozone.om.https-address";
+  public static final String OZONE_OM_KEYTAB_FILE =
+      "ozone.om.keytab.file";
+  public static final String OZONE_OM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
+  public static final int OZONE_OM_HTTP_BIND_PORT_DEFAULT = 9874;
+  public static final int OZONE_OM_HTTPS_BIND_PORT_DEFAULT = 9875;
+
+  // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB.
+  public static final String OZONE_OM_DB_CACHE_SIZE_MB =
+      "ozone.om.db.cache.size.mb";
+  public static final int OZONE_OM_DB_CACHE_SIZE_DEFAULT = 128;
+
+  public static final String OZONE_OM_USER_MAX_VOLUME =
+      "ozone.om.user.max.volume";
+  public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024;
+
+  // OM Default user/group permissions
+  public static final String OZONE_OM_USER_RIGHTS =
+      "ozone.om.user.rights";
+  public static final OzoneAcl.OzoneACLRights OZONE_OM_USER_RIGHTS_DEFAULT =
+      OzoneAcl.OzoneACLRights.READ_WRITE;
+
+  public static final String OZONE_OM_GROUP_RIGHTS =
+      "ozone.om.group.rights";
+  public static final OzoneAcl.OzoneACLRights OZONE_OM_GROUP_RIGHTS_DEFAULT =
+      OzoneAcl.OzoneACLRights.READ_WRITE;
+
+  public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
+      "ozone.key.deleting.limit.per.task";
+  public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
new file mode 100644
index 0000000..6aabfef
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketArgs;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
+
+/**
+ * A class that encapsulates Bucket Arguments.
+ */
+public final class OmBucketArgs {
+  /**
+   * Name of the volume in which the bucket belongs to.
+   */
+  private final String volumeName;
+  /**
+   * Name of the bucket.
+   */
+  private final String bucketName;
+  /**
+   * ACL's that are to be added for the bucket.
+   */
+  private List<OzoneAcl> addAcls;
+  /**
+   * ACL's that are to be removed from the bucket.
+   */
+  private List<OzoneAcl> removeAcls;
+  /**
+   * Bucket Version flag.
+   */
+  private Boolean isVersionEnabled;
+  /**
+   * Type of storage to be used for this bucket.
+   * [RAM_DISK, SSD, DISK, ARCHIVE]
+   */
+  private StorageType storageType;
+
+  /**
+   * Private constructor, constructed via builder.
+   * @param volumeName - Volume name.
+   * @param bucketName - Bucket name.
+   * @param addAcls - ACL's to be added.
+   * @param removeAcls - ACL's to be removed.
+   * @param isVersionEnabled - Bucket version flag.
+   * @param storageType - Storage type to be used.
+   */
+  private OmBucketArgs(String volumeName, String bucketName,
+                       List<OzoneAcl> addAcls, List<OzoneAcl> removeAcls,
+                       Boolean isVersionEnabled, StorageType storageType) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.addAcls = addAcls;
+    this.removeAcls = removeAcls;
+    this.isVersionEnabled = isVersionEnabled;
+    this.storageType = storageType;
+  }
+
+  /**
+   * Returns the Volume Name.
+   * @return String.
+   */
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  /**
+   * Returns the Bucket Name.
+   * @return String
+   */
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  /**
+   * Returns the ACL's that are to be added.
+   * @return List<OzoneAclInfo>
+   */
+  public List<OzoneAcl> getAddAcls() {
+    return addAcls;
+  }
+
+  /**
+   * Returns the ACL's that are to be removed.
+   * @return List<OzoneAclInfo>
+   */
+  public List<OzoneAcl> getRemoveAcls() {
+    return removeAcls;
+  }
+
+  /**
+   * Returns true if bucket version is enabled, else false.
+   * @return isVersionEnabled
+   */
+  public Boolean getIsVersionEnabled() {
+    return isVersionEnabled;
+  }
+
+  /**
+   * Returns the type of storage to be used.
+   * @return StorageType
+   */
+  public StorageType getStorageType() {
+    return storageType;
+  }
+
+  /**
+   * Returns new builder class that builds a OmBucketArgs.
+   *
+   * @return Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for OmBucketArgs.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private List<OzoneAcl> addAcls;
+    private List<OzoneAcl> removeAcls;
+    private Boolean isVersionEnabled;
+    private StorageType storageType;
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setAddAcls(List<OzoneAcl> acls) {
+      this.addAcls = acls;
+      return this;
+    }
+
+    public Builder setRemoveAcls(List<OzoneAcl> acls) {
+      this.removeAcls = acls;
+      return this;
+    }
+
+    public Builder setIsVersionEnabled(Boolean versionFlag) {
+      this.isVersionEnabled = versionFlag;
+      return this;
+    }
+
+    public Builder setStorageType(StorageType storage) {
+      this.storageType = storage;
+      return this;
+    }
+
+    /**
+     * Constructs the OmBucketArgs.
+     * @return instance of OmBucketArgs.
+     */
+    public OmBucketArgs build() {
+      Preconditions.checkNotNull(volumeName);
+      Preconditions.checkNotNull(bucketName);
+      return new OmBucketArgs(volumeName, bucketName, addAcls,
+          removeAcls, isVersionEnabled, storageType);
+    }
+  }
+
+  /**
+   * Creates BucketArgs protobuf from OmBucketArgs.
+   */
+  public BucketArgs getProtobuf() {
+    BucketArgs.Builder builder = BucketArgs.newBuilder();
+    builder.setVolumeName(volumeName)
+        .setBucketName(bucketName);
+    if(addAcls != null && !addAcls.isEmpty()) {
+      builder.addAllAddAcls(addAcls.stream().map(
+          OMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
+    }
+    if(removeAcls != null && !removeAcls.isEmpty()) {
+      builder.addAllRemoveAcls(removeAcls.stream().map(
+          OMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
+    }
+    if(isVersionEnabled != null) {
+      builder.setIsVersionEnabled(isVersionEnabled);
+    }
+    if(storageType != null) {
+      builder.setStorageType(
+          PBHelperClient.convertStorageType(storageType));
+    }
+    return builder.build();
+  }
+
+  /**
+   * Parses BucketInfo protobuf and creates OmBucketArgs.
+   * @param bucketArgs
+   * @return instance of OmBucketArgs
+   */
+  public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) {
+    return new OmBucketArgs(bucketArgs.getVolumeName(),
+        bucketArgs.getBucketName(),
+        bucketArgs.getAddAclsList().stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
+        bucketArgs.getRemoveAclsList().stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
+        bucketArgs.hasIsVersionEnabled() ?
+            bucketArgs.getIsVersionEnabled() : null,
+        bucketArgs.hasStorageType() ? PBHelperClient.convertStorageType(
+            bucketArgs.getStorageType()) : null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
new file mode 100644
index 0000000..bf5abdd
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * A class that encapsulates Bucket Info.
+ */
+public final class OmBucketInfo {
+  /**
+   * Name of the volume in which the bucket belongs to.
+   */
+  private final String volumeName;
+  /**
+   * Name of the bucket.
+   */
+  private final String bucketName;
+  /**
+   * ACL Information.
+   */
+  private List<OzoneAcl> acls;
+  /**
+   * Bucket Version flag.
+   */
+  private Boolean isVersionEnabled;
+  /**
+   * Type of storage to be used for this bucket.
+   * [RAM_DISK, SSD, DISK, ARCHIVE]
+   */
+  private StorageType storageType;
+  /**
+   * Creation time of bucket.
+   */
+  private final long creationTime;
+
+  /**
+   * Private constructor, constructed via builder.
+   * @param volumeName - Volume name.
+   * @param bucketName - Bucket name.
+   * @param acls - list of ACLs.
+   * @param isVersionEnabled - Bucket version flag.
+   * @param storageType - Storage type to be used.
+   * @param creationTime - Bucket creation time.
+   */
+  private OmBucketInfo(String volumeName, String bucketName,
+                       List<OzoneAcl> acls, boolean isVersionEnabled,
+                       StorageType storageType, long creationTime) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.acls = acls;
+    this.isVersionEnabled = isVersionEnabled;
+    this.storageType = storageType;
+    this.creationTime = creationTime;
+  }
+
+  /**
+   * Returns the Volume Name.
+   * @return String.
+   */
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  /**
+   * Returns the Bucket Name.
+   * @return String
+   */
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  /**
+   * Returns the ACL's associated with this bucket.
+   * @return List<OzoneAcl>
+   */
+  public List<OzoneAcl> getAcls() {
+    return acls;
+  }
+
+  /**
+   * Returns true if bucket version is enabled, else false.
+   * @return isVersionEnabled
+   */
+  public boolean getIsVersionEnabled() {
+    return isVersionEnabled;
+  }
+
+  /**
+   * Returns the type of storage to be used.
+   * @return StorageType
+   */
+  public StorageType getStorageType() {
+    return storageType;
+  }
+
+  /**
+   * Returns creation time.
+   *
+   * @return long
+   */
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  /**
+   * Returns new builder class that builds a OmBucketInfo.
+   *
+   * @return Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for OmBucketInfo.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private List<OzoneAcl> acls;
+    private Boolean isVersionEnabled;
+    private StorageType storageType;
+    private long creationTime;
+
+    Builder() {
+      //Default values
+      this.acls = new LinkedList<>();
+      this.isVersionEnabled = false;
+      this.storageType = StorageType.DISK;
+    }
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setAcls(List<OzoneAcl> listOfAcls) {
+      this.acls = listOfAcls;
+      return this;
+    }
+
+    public Builder setIsVersionEnabled(Boolean versionFlag) {
+      this.isVersionEnabled = versionFlag;
+      return this;
+    }
+
+    public Builder setStorageType(StorageType storage) {
+      this.storageType = storage;
+      return this;
+    }
+
+    public Builder setCreationTime(long createdOn) {
+      this.creationTime = createdOn;
+      return this;
+    }
+
+    /**
+     * Constructs the OmBucketInfo.
+     * @return instance of OmBucketInfo.
+     */
+    public OmBucketInfo build() {
+      Preconditions.checkNotNull(volumeName);
+      Preconditions.checkNotNull(bucketName);
+      Preconditions.checkNotNull(acls);
+      Preconditions.checkNotNull(isVersionEnabled);
+      Preconditions.checkNotNull(storageType);
+
+      return new OmBucketInfo(volumeName, bucketName, acls,
+          isVersionEnabled, storageType, creationTime);
+    }
+  }
+
+  /**
+   * Creates BucketInfo protobuf from OmBucketInfo.
+   */
+  public BucketInfo getProtobuf() {
+    return BucketInfo.newBuilder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .addAllAcls(acls.stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()))
+        .setIsVersionEnabled(isVersionEnabled)
+        .setStorageType(PBHelperClient.convertStorageType(
+            storageType))
+        .setCreationTime(creationTime)
+        .build();
+  }
+
+  /**
+   * Parses BucketInfo protobuf and creates OmBucketInfo.
+   * @param bucketInfo
+   * @return instance of OmBucketInfo
+   */
+  public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) {
+    return new OmBucketInfo(
+        bucketInfo.getVolumeName(),
+        bucketInfo.getBucketName(),
+        bucketInfo.getAclsList().stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
+        bucketInfo.getIsVersionEnabled(),
+        PBHelperClient.convertStorageType(
+            bucketInfo.getStorageType()), bucketInfo.getCreationTime());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
new file mode 100644
index 0000000..1f8ed5f
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+
+/**
+ * Args for key. Client use this to specify key's attributes on  key creation
+ * (putKey()).
+ */
+public final class OmKeyArgs {
+  private final String volumeName;
+  private final String bucketName;
+  private final String keyName;
+  private long dataSize;
+  private final ReplicationType type;
+  private final ReplicationFactor factor;
+
+  private OmKeyArgs(String volumeName, String bucketName, String keyName,
+                    long dataSize, ReplicationType type, ReplicationFactor factor) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.keyName = keyName;
+    this.dataSize = dataSize;
+    this.type = type;
+    this.factor = factor;
+  }
+
+  public ReplicationType getType() {
+    return type;
+  }
+
+  public ReplicationFactor getFactor() {
+    return factor;
+  }
+
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  public String getKeyName() {
+    return keyName;
+  }
+
+  public long getDataSize() {
+    return dataSize;
+  }
+
+  public void setDataSize(long size) {
+    dataSize = size;
+  }
+
+  /**
+   * Builder class of OmKeyArgs.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private String keyName;
+    private long dataSize;
+    private ReplicationType type;
+    private ReplicationFactor factor;
+
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setKeyName(String key) {
+      this.keyName = key;
+      return this;
+    }
+
+    public Builder setDataSize(long size) {
+      this.dataSize = size;
+      return this;
+    }
+
+    public Builder setType(ReplicationType replicationType) {
+      this.type = replicationType;
+      return this;
+    }
+
+    public Builder setFactor(ReplicationFactor replicationFactor) {
+      this.factor = replicationFactor;
+      return this;
+    }
+
+    public OmKeyArgs build() {
+      return new OmKeyArgs(volumeName, bucketName, keyName, dataSize,
+          type, factor);
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: Merge trunk into HDDS-48

Posted by bh...@apache.org.
Merge trunk into HDDS-48


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c275a9a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c275a9a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c275a9a6

Branch: refs/heads/HDDS-48
Commit: c275a9a6a07b2bd889bdba4d05b420027f430b34
Parents: 44e19fc 83cd84b
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Jul 9 12:13:03 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Jul 9 12:13:03 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |    4 +
 dev-support/bin/ozone-dist-layout-stitching     |    2 +-
 ...ExcludePrivateAnnotationsStandardDoclet.java |    6 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |    6 +-
 .../org/apache/hadoop/conf/Configuration.java   |  458 +++---
 .../java/org/apache/hadoop/fs/FileContext.java  |    9 +-
 .../org/apache/hadoop/fs/LocalDirAllocator.java |    7 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |   39 +-
 .../org/apache/hadoop/fs/TestFileContext.java   |   44 +-
 .../apache/hadoop/fs/TestLocalDirAllocator.java |   59 +
 .../src/main/compose/ozone/docker-compose.yaml  |    6 +-
 .../src/main/compose/ozone/docker-config        |    2 +-
 .../src/main/compose/ozoneperf/README.md        |    4 +-
 .../main/compose/ozoneperf/docker-compose.yaml  |    6 +-
 .../src/main/compose/ozoneperf/docker-config    |    2 +-
 .../scm/client/ContainerOperationClient.java    |  117 +-
 hadoop-hdds/common/pom.xml                      |   18 +
 .../hadoop/hdds/protocol/DatanodeDetails.java   |   13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |    6 +-
 .../hadoop/hdds/scm/client/ScmClient.java       |   43 +-
 .../container/common/helpers/ContainerInfo.java |  167 ++-
 .../common/helpers/ContainerWithPipeline.java   |  131 ++
 .../StorageContainerLocationProtocol.java       |   18 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   34 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java    |   22 +-
 .../apache/hadoop/ozone/audit/AuditAction.java  |   30 +
 .../hadoop/ozone/audit/AuditEventStatus.java    |   36 +
 .../apache/hadoop/ozone/audit/AuditLogger.java  |  128 ++
 .../hadoop/ozone/audit/AuditLoggerType.java     |   37 +
 .../apache/hadoop/ozone/audit/AuditMarker.java  |   38 +
 .../apache/hadoop/ozone/audit/Auditable.java    |   32 +
 .../apache/hadoop/ozone/audit/package-info.java |  123 ++
 .../org/apache/hadoop/ozone/common/Storage.java |    6 +-
 ...rLocationProtocolServerSideTranslatorPB.java |   33 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |   10 +-
 .../StorageContainerLocationProtocol.proto      |   34 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |   28 +-
 .../common/src/main/resources/ozone-default.xml |  131 +-
 .../apache/hadoop/ozone/audit/DummyAction.java  |   51 +
 .../apache/hadoop/ozone/audit/DummyEntity.java  |   57 +
 .../ozone/audit/TestOzoneAuditLogger.java       |  147 ++
 .../apache/hadoop/ozone/audit/package-info.java |   23 +
 .../common/src/test/resources/log4j2.properties |   76 +
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |   11 -
 .../DeleteBlocksCommandHandler.java             |   30 +-
 .../protocol/StorageContainerNodeProtocol.java  |    4 +-
 .../src/main/resources/webapps/static/ozone.js  |    4 +-
 .../webapps/static/templates/config.html        |    4 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   80 +-
 .../block/DatanodeDeletedBlockTransactions.java |   11 +-
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |    2 +-
 .../container/CloseContainerEventHandler.java   |   35 +-
 .../hdds/scm/container/ContainerMapping.java    |  128 +-
 .../scm/container/ContainerStateManager.java    |   30 +-
 .../hadoop/hdds/scm/container/Mapping.java      |   26 +-
 .../scm/container/closer/ContainerCloser.java   |   15 +-
 .../scm/container/states/ContainerStateMap.java |   13 +-
 .../hadoop/hdds/scm/events/SCMEvents.java       |   80 ++
 .../hadoop/hdds/scm/events/package-info.java    |   23 +
 .../hadoop/hdds/scm/node/CommandQueue.java      |    2 +-
 .../hadoop/hdds/scm/node/DatanodeInfo.java      |  109 ++
 .../hdds/scm/node/HeartbeatQueueItem.java       |   98 --
 .../hadoop/hdds/scm/node/NodeManager.java       |   16 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  |  575 ++++++++
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |  511 +------
 .../node/states/NodeAlreadyExistsException.java |   45 +
 .../hdds/scm/node/states/NodeException.java     |   44 +
 .../scm/node/states/NodeNotFoundException.java  |   49 +
 .../hdds/scm/node/states/NodeStateMap.java      |  281 ++++
 .../hdds/scm/pipelines/PipelineManager.java     |   27 +-
 .../hdds/scm/pipelines/PipelineSelector.java    |   16 +
 .../scm/pipelines/ratis/RatisManagerImpl.java   |    1 +
 .../standalone/StandaloneManagerImpl.java       |    1 +
 .../hdds/scm/server/SCMBlockProtocolServer.java |    2 +-
 .../scm/server/SCMClientProtocolServer.java     |   74 +-
 .../server/SCMDatanodeHeartbeatDispatcher.java  |   13 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |    2 +-
 .../scm/server/StorageContainerManager.java     |    7 +-
 .../hdds/scm/block/TestDeletedBlockLog.java     |   15 +-
 .../hdds/scm/container/MockNodeManager.java     |   58 +-
 .../TestCloseContainerEventHandler.java         |   54 +-
 .../scm/container/TestContainerMapping.java     |   27 +-
 .../container/closer/TestContainerCloser.java   |   18 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   16 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  186 +--
 .../TestSCMDatanodeHeartbeatDispatcher.java     |   20 +-
 .../testutils/ReplicationNodeManagerMock.java   |   37 +-
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java       |    2 +-
 .../cli/container/CloseContainerHandler.java    |   10 +-
 .../cli/container/DeleteContainerHandler.java   |    9 +-
 .../scm/cli/container/InfoContainerHandler.java |   11 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   19 -
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   46 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   15 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |    3 +
 .../hdfs/client/impl/BlockReaderFactory.java    |   21 +-
 .../hdfs/client/impl/BlockReaderLocal.java      |   93 +-
 .../client/impl/BlockReaderLocalLegacy.java     |   44 +-
 .../hdfs/client/impl/BlockReaderRemote.java     |   33 +-
 .../datanode/ReplicaNotFoundException.java      |    2 +-
 .../ha/ConfiguredFailoverProxyProvider.java     |    9 +-
 .../InMemoryAliasMapFailoverProxyProvider.java  |   38 +
 .../hdfs/server/federation/router/Quota.java    |   10 +-
 .../router/RouterQuotaUpdateService.java        |   43 +-
 .../federation/router/RouterRpcServer.java      |    1 -
 .../router/TestDisableRouterQuota.java          |   94 ++
 .../federation/router/TestRouterQuota.java      |  212 ++-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |    5 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    |   37 +-
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |   15 +-
 ...yAliasMapProtocolClientSideTranslatorPB.java |   95 +-
 .../aliasmap/InMemoryAliasMapProtocol.java      |    5 +
 .../aliasmap/InMemoryLevelDBAliasMapServer.java |   19 +-
 .../impl/InMemoryLevelDBAliasMapClient.java     |   80 +-
 .../impl/TextFileRegionAliasMap.java            |    5 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   21 +-
 .../hdfs/server/datanode/DiskBalancer.java      |   29 +-
 .../erasurecode/StripedBlockReader.java         |    2 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |    8 +
 .../hdfs/server/namenode/NamenodeFsck.java      |    1 -
 .../src/main/resources/hdfs-default.xml         |   35 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   13 +-
 .../apache/hadoop/hdfs/MiniDFSNNTopology.java   |    2 +-
 .../hdfs/client/impl/BlockReaderTestUtil.java   |    2 -
 .../hdfs/client/impl/TestBlockReaderLocal.java  |    2 -
 .../blockmanagement/TestBlockTokenWithDFS.java  |    2 -
 .../TestNameNodePrunesMissingStorages.java      |    5 +-
 .../impl/TestInMemoryLevelDBAliasMapClient.java |    7 +
 .../datanode/TestDataNodeVolumeFailure.java     |    2 -
 .../server/diskbalancer/TestDiskBalancer.java   |   80 +-
 .../shortcircuit/TestShortCircuitCache.java     |   89 ++
 .../src/test/acceptance/basic/basic.robot       |    6 +-
 .../test/acceptance/basic/docker-compose.yaml   |    8 +-
 .../src/test/acceptance/basic/docker-config     |    4 +-
 .../src/test/acceptance/basic/ozone-shell.robot |   18 +-
 .../src/test/acceptance/commonlib.robot         |    4 +-
 .../test/acceptance/ozonefs/docker-compose.yaml |    8 +-
 .../src/test/acceptance/ozonefs/docker-config   |    4 +-
 .../src/test/acceptance/ozonefs/ozonefs.robot   |    6 +-
 .../apache/hadoop/ozone/client/BucketArgs.java  |    4 +-
 .../hadoop/ozone/client/OzoneClientFactory.java |   89 +-
 .../apache/hadoop/ozone/client/OzoneKey.java    |    2 +-
 .../apache/hadoop/ozone/client/VolumeArgs.java  |    4 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |   33 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   63 +-
 .../client/rest/DefaultRestServerSelector.java  |    2 +-
 .../hadoop/ozone/client/rest/RestClient.java    |   15 +-
 .../ozone/client/rest/RestServerSelector.java   |    2 +-
 .../hadoop/ozone/client/rpc/RpcClient.java      |  142 +-
 .../ozone/client/TestHddsClientUtils.java       |   24 +-
 hadoop-ozone/common/pom.xml                     |    2 +-
 hadoop-ozone/common/src/main/bin/ozone          |    9 +-
 hadoop-ozone/common/src/main/bin/start-ozone.sh |   16 +-
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |   16 +-
 .../java/org/apache/hadoop/ozone/KsmUtils.java  |   87 --
 .../java/org/apache/hadoop/ozone/OmUtils.java   |   94 ++
 .../org/apache/hadoop/ozone/audit/OMAction.java |   51 +
 .../apache/hadoop/ozone/audit/package-info.java |   22 +
 .../apache/hadoop/ozone/freon/OzoneGetConf.java |   16 +-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  |   81 --
 .../hadoop/ozone/ksm/helpers/KsmBucketArgs.java |  233 ---
 .../hadoop/ozone/ksm/helpers/KsmBucketInfo.java |  235 ---
 .../hadoop/ozone/ksm/helpers/KsmKeyArgs.java    |  119 --
 .../hadoop/ozone/ksm/helpers/KsmKeyInfo.java    |  277 ----
 .../ozone/ksm/helpers/KsmKeyLocationInfo.java   |  129 --
 .../ksm/helpers/KsmKeyLocationInfoGroup.java    |  118 --
 .../ozone/ksm/helpers/KsmOzoneAclMap.java       |  110 --
 .../hadoop/ozone/ksm/helpers/KsmVolumeArgs.java |  223 ---
 .../ozone/ksm/helpers/OpenKeySession.java       |   50 -
 .../hadoop/ozone/ksm/helpers/ServiceInfo.java   |  237 ---
 .../hadoop/ozone/ksm/helpers/VolumeArgs.java    |  140 --
 .../hadoop/ozone/ksm/helpers/package-info.java  |   18 -
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../ksm/protocol/KeySpaceManagerProtocol.java   |  252 ----
 .../hadoop/ozone/ksm/protocol/package-info.java |   19 -
 ...ceManagerProtocolClientSideTranslatorPB.java |  769 ----------
 .../protocolPB/KeySpaceManagerProtocolPB.java   |   34 -
 .../ozone/ksm/protocolPB/package-info.java      |   19 -
 .../apache/hadoop/ozone/om/OMConfigKeys.java    |   81 ++
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  233 +++
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  235 +++
 .../hadoop/ozone/om/helpers/OmKeyArgs.java      |  119 ++
 .../hadoop/ozone/om/helpers/OmKeyInfo.java      |  277 ++++
 .../ozone/om/helpers/OmKeyLocationInfo.java     |  129 ++
 .../om/helpers/OmKeyLocationInfoGroup.java      |  118 ++
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java  |  110 ++
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  223 +++
 .../hadoop/ozone/om/helpers/OpenKeySession.java |   50 +
 .../hadoop/ozone/om/helpers/ServiceInfo.java    |  237 +++
 .../hadoop/ozone/om/helpers/VolumeArgs.java     |  140 ++
 .../hadoop/ozone/om/helpers/package-info.java   |   18 +
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 .../ozone/om/protocol/OzoneManagerProtocol.java |  252 ++++
 .../hadoop/ozone/om/protocol/package-info.java  |   19 +
 ...neManagerProtocolClientSideTranslatorPB.java |  769 ++++++++++
 .../om/protocolPB/OzoneManagerProtocolPB.java   |   34 +
 .../ozone/om/protocolPB/package-info.java       |   19 +
 .../hadoop/ozone/protocolPB/KSMPBHelper.java    |  113 --
 .../hadoop/ozone/protocolPB/OMPBHelper.java     |  113 ++
 .../hadoop/ozone/protocolPB/OzonePBHelper.java  |   30 +
 .../main/proto/KeySpaceManagerProtocol.proto    |  474 ------
 .../src/main/proto/OzoneManagerProtocol.proto   |  480 +++++++
 hadoop-ozone/docs/content/GettingStarted.md     |   18 +-
 hadoop-ozone/docs/content/Metrics.md            |   10 +-
 hadoop-ozone/docs/content/_index.md             |   12 +-
 hadoop-ozone/docs/static/OzoneOverview.svg      |    2 +-
 .../container/TestContainerStateManager.java    |  161 ++-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   24 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   66 +-
 .../hadoop/ozone/TestContainerOperations.java   |   11 +-
 .../ozone/TestOzoneConfigurationFields.java     |    4 +-
 .../ozone/TestStorageContainerManager.java      |   28 +-
 .../TestStorageContainerManagerHelper.java      |   22 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |    6 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   22 +-
 .../commandhandler/TestBlockDeletion.java       |  212 +++
 .../TestCloseContainerByPipeline.java           |   97 +-
 .../TestCloseContainerHandler.java              |   14 +-
 .../ozone/ksm/TestContainerReportWithKeys.java  |  143 --
 .../apache/hadoop/ozone/ksm/TestKSMMetrcis.java |  306 ----
 .../apache/hadoop/ozone/ksm/TestKSMSQLCli.java  |  284 ----
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   | 1350 ------------------
 .../ksm/TestKeySpaceManagerRestInterface.java   |  135 --
 .../ozone/ksm/TestKsmBlockVersioning.java       |  253 ----
 .../ksm/TestMultipleContainerReadWrite.java     |  215 ---
 .../ozone/om/TestContainerReportWithKeys.java   |  143 ++
 .../om/TestMultipleContainerReadWrite.java      |  215 +++
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |  253 ++++
 .../apache/hadoop/ozone/om/TestOmMetrics.java   |  313 ++++
 .../apache/hadoop/ozone/om/TestOmSQLCli.java    |  284 ++++
 .../hadoop/ozone/om/TestOzoneManager.java       | 1349 +++++++++++++++++
 .../ozone/om/TestOzoneManagerRestInterface.java |  135 ++
 .../hadoop/ozone/ozShell/TestOzoneShell.java    |   14 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |    6 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |    3 +-
 .../ozone/scm/TestContainerSmallFile.java       |   36 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |  127 +-
 .../ozone/scm/TestXceiverClientManager.java     |   62 +-
 .../ozone/scm/TestXceiverClientMetrics.java     |   14 +-
 .../hadoop/ozone/scm/node/TestQueryNode.java    |   19 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |   12 +-
 .../hadoop/ozone/web/client/TestKeys.java       |   58 +-
 .../src/test/resources/webapps/ksm/.gitkeep     |   15 -
 .../resources/webapps/ozoneManager/.gitkeep     |   15 +
 .../server/datanode/ObjectStoreHandler.java     |   33 +-
 .../ozone/web/handlers/KeyProcessTemplate.java  |    4 +-
 .../web/handlers/VolumeProcessTemplate.java     |    4 +-
 .../web/storage/DistributedStorageHandler.java  |  153 +-
 .../apache/hadoop/ozone/ksm/BucketManager.java  |   79 -
 .../hadoop/ozone/ksm/BucketManagerImpl.java     |  315 ----
 .../org/apache/hadoop/ozone/ksm/KSMMXBean.java  |   31 -
 .../hadoop/ozone/ksm/KSMMetadataManager.java    |  253 ----
 .../ozone/ksm/KSMMetadataManagerImpl.java       |  526 -------
 .../org/apache/hadoop/ozone/ksm/KSMMetrics.java |  459 ------
 .../org/apache/hadoop/ozone/ksm/KSMStorage.java |   90 --
 .../hadoop/ozone/ksm/KeyDeletingService.java    |  142 --
 .../org/apache/hadoop/ozone/ksm/KeyManager.java |  175 ---
 .../apache/hadoop/ozone/ksm/KeyManagerImpl.java |  566 --------
 .../hadoop/ozone/ksm/KeySpaceManager.java       |  914 ------------
 .../ozone/ksm/KeySpaceManagerHttpServer.java    |   78 -
 .../hadoop/ozone/ksm/OpenKeyCleanupService.java |  117 --
 .../ozone/ksm/ServiceListJSONServlet.java       |  103 --
 .../apache/hadoop/ozone/ksm/VolumeManager.java  |  100 --
 .../hadoop/ozone/ksm/VolumeManagerImpl.java     |  391 -----
 .../ozone/ksm/exceptions/KSMException.java      |  118 --
 .../ozone/ksm/exceptions/package-info.java      |   19 -
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../apache/hadoop/ozone/om/BucketManager.java   |   79 +
 .../hadoop/ozone/om/BucketManagerImpl.java      |  315 ++++
 .../hadoop/ozone/om/KeyDeletingService.java     |  142 ++
 .../org/apache/hadoop/ozone/om/KeyManager.java  |  175 +++
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  566 ++++++++
 .../org/apache/hadoop/ozone/om/OMMXBean.java    |   31 +
 .../hadoop/ozone/om/OMMetadataManager.java      |  253 ++++
 .../org/apache/hadoop/ozone/om/OMMetrics.java   |  459 ++++++
 .../org/apache/hadoop/ozone/om/OMStorage.java   |   90 ++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  |  526 +++++++
 .../hadoop/ozone/om/OpenKeyCleanupService.java  |  117 ++
 .../apache/hadoop/ozone/om/OzoneManager.java    |  911 ++++++++++++
 .../hadoop/ozone/om/OzoneManagerHttpServer.java |   78 +
 .../hadoop/ozone/om/ServiceListJSONServlet.java |  103 ++
 .../apache/hadoop/ozone/om/VolumeManager.java   |  100 ++
 .../hadoop/ozone/om/VolumeManagerImpl.java      |  390 +++++
 .../hadoop/ozone/om/exceptions/OMException.java |  118 ++
 .../ozone/om/exceptions/package-info.java       |   19 +
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 ...ceManagerProtocolServerSideTranslatorPB.java |  559 --------
 ...neManagerProtocolServerSideTranslatorPB.java |  571 ++++++++
 .../hadoop/ozone/protocolPB/package-info.java   |    2 +-
 .../src/main/webapps/ksm/index.html             |   70 -
 .../src/main/webapps/ksm/ksm-metrics.html       |   44 -
 .../ozone-manager/src/main/webapps/ksm/ksm.js   |  110 --
 .../ozone-manager/src/main/webapps/ksm/main.css |   23 -
 .../src/main/webapps/ksm/main.html              |   18 -
 .../src/main/webapps/ozoneManager/index.html    |   70 +
 .../src/main/webapps/ozoneManager/main.css      |   23 +
 .../src/main/webapps/ozoneManager/main.html     |   18 +
 .../main/webapps/ozoneManager/om-metrics.html   |   44 +
 .../main/webapps/ozoneManager/ozoneManager.js   |  110 ++
 .../hadoop/ozone/ksm/TestBucketManagerImpl.java |  395 -----
 .../hadoop/ozone/ksm/TestChunkStreams.java      |  234 ---
 .../ksm/TestKeySpaceManagerHttpServer.java      |  141 --
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../hadoop/ozone/om/TestBucketManagerImpl.java  |  394 +++++
 .../hadoop/ozone/om/TestChunkStreams.java       |  234 +++
 .../ozone/om/TestOzoneManagerHttpServer.java    |  141 ++
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 .../hadoop/fs/ozone/contract/OzoneContract.java |    4 +-
 .../genesis/BenchMarkContainerStateMap.java     |   16 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |  111 +-
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java      |   10 +
 .../s3guard/AbstractS3GuardToolTestBase.java    |   18 +
 .../namenode/ITestProvidedImplementation.java   |  373 ++++-
 .../dev-support/findbugs-exclude.xml            |   17 +-
 .../hadoop/yarn/api/records/Resource.java       |   13 +
 .../api/records/impl/LightWeightResource.java   |   23 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |    7 +
 .../impl/pb/GetApplicationsRequestPBImpl.java   |   44 +-
 .../logaggregation/AggregatedLogFormat.java     |    6 +-
 .../timeline/RollingLevelDBTimelineStore.java   |    6 +
 .../server/timeline/TimelineDataManager.java    |    7 +-
 .../timeline/webapp/TimelineWebServices.java    |    4 +
 .../webapp/TestTimelineWebServices.java         |    2 +-
 .../amrmproxy/BroadcastAMRMProxyPolicy.java     |   11 -
 .../amrmproxy/RejectAMRMProxyPolicy.java        |    4 -
 .../TestBroadcastAMRMProxyFederationPolicy.java |   11 +-
 .../yarn/server/nodemanager/NodeManager.java    |   66 +-
 .../runtime/DockerLinuxContainerRuntime.java    |    4 +-
 .../runtime/ContainerExecutionException.java    |    6 +
 .../impl/container-executor.c                   |   30 +-
 .../container-executor/impl/utils/docker-util.c |    2 +-
 .../test/test-container-executor.c              |   20 +
 .../nodemanager/TestNodeManagerResync.java      |   56 +
 .../runtime/TestDockerContainerRuntime.java     |   10 +-
 .../conf/capacity-scheduler.xml                 |   10 +
 .../scheduler/capacity/CapacityScheduler.java   |   45 +-
 .../CapacitySchedulerConfiguration.java         |   10 +
 .../scheduler/capacity/ParentQueue.java         |   36 +-
 .../allocator/AbstractContainerAllocator.java   |   13 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |    5 +
 .../scheduler/fair/ConfigurableResource.java    |   69 +-
 .../fair/FairSchedulerConfiguration.java        |  174 ++-
 .../allocation/AllocationFileQueueParser.java   |    2 +-
 .../resourcemanager/webapp/dao/AppInfo.java     |    2 +-
 .../webapp/dao/SchedulerInfo.java               |    8 +-
 .../TestWorkPreservingRMRestart.java            |    2 +
 .../fair/TestFairSchedulerConfiguration.java    |  160 ++-
 .../webapp/TestRMWebServices.java               |   31 +-
 .../webapp/TestRMWebServicesApps.java           |   14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java |  242 ++++
 .../webapp/TestRMWebServicesCapacitySched.java  |   30 +-
 .../TestRMWebServicesConfigurationMutation.java |    5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |   95 +-
 .../TestRMWebServicesSchedulerActivities.java   |    2 +-
 ...ustomResourceTypesConfigurationProvider.java |  138 ++
 .../FairSchedulerJsonVerifications.java         |  139 ++
 .../FairSchedulerXmlVerifications.java          |  153 ++
 ...ervicesFairSchedulerCustomResourceTypes.java |  271 ++++
 .../webapp/helper/AppInfoJsonVerifications.java |  123 ++
 .../webapp/helper/AppInfoXmlVerifications.java  |  132 ++
 .../webapp/helper/BufferedClientResponse.java   |   57 +
 .../helper/JsonCustomResourceTypeTestcase.java  |   77 +
 .../ResourceRequestsJsonVerifications.java      |  252 ++++
 .../ResourceRequestsXmlVerifications.java       |  215 +++
 .../helper/XmlCustomResourceTypeTestCase.java   |  112 ++
 .../router/clientrm/RouterClientRMService.java  |   53 +-
 .../router/rmadmin/RouterRMAdminService.java    |   51 +-
 .../server/router/webapp/RouterWebServices.java |   48 +-
 .../clientrm/TestRouterClientRMService.java     |   60 +
 .../rmadmin/TestRouterRMAdminService.java       |   60 +
 .../router/webapp/TestRouterWebServices.java    |   65 +
 .../pom.xml                                     |   10 +
 .../storage/TestTimelineReaderHBaseDown.java    |  220 +++
 .../storage/HBaseTimelineReaderImpl.java        |   93 ++
 .../reader/TimelineFromIdConverter.java         |   93 ++
 .../reader/TimelineReaderWebServices.java       |  198 ++-
 .../TestTimelineReaderWebServicesBasicAcl.java  |  154 ++
 .../src/site/markdown/FairScheduler.md          |    6 +-
 .../src/main/webapp/app/initializers/loader.js  |   10 +-
 379 files changed, 22363 insertions(+), 15606 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --cc hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 82d67b7,4fad5d8..0db5993
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@@ -98,11 -93,8 +98,11 @@@ public final class OzoneConsts 
    public static final String BLOCK_DB = "block.db";
    public static final String OPEN_CONTAINERS_DB = "openContainers.db";
    public static final String DELETED_BLOCK_DB = "deletedBlock.db";
-   public static final String KSM_DB_NAME = "ksm.db";
+   public static final String OM_DB_NAME = "om.db";
  
 +  public static final String STORAGE_DIR_CHUNKS = "chunks";
 +  public static final String CONTAINER_FILE_CHECKSUM_EXTENSION = ".chksm";
 +
    /**
     * Supports Bucket Versioning.
     */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
----------------------------------------------------------------------
diff --cc hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 4fc1cd9,d215da9..c3d1596
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@@ -31,13 -29,11 +31,12 @@@ import org.apache.hadoop.hdds.protocol.
  import org.apache.hadoop.hdds.protocol.proto
      .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
  import org.apache.hadoop.ozone.OzoneConsts;
 -import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
  import org.apache.hadoop.ozone.container.common.helpers
      .DeletedContainerBlocksSummary;
 -import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 -import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 +import org.apache.hadoop.ozone.container.common.interfaces.Container;
- import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
 +import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
  import org.apache.hadoop.ozone.container.common.statemachine
      .EndpointStateMachine;
  import org.apache.hadoop.ozone.container.common.statemachine
@@@ -167,21 -145,28 +166,28 @@@ public class DeleteBlocksCommandHandle
     * Move a bunch of blocks from a container to deleting state.
     * This is a meta update, the actual deletes happen in async mode.
     *
 +   * @param containerData - KeyValueContainerData
     * @param delTX a block deletion transaction.
 -   * @param config configuration.
     * @throws IOException if I/O error occurs.
     */
 -  private void deleteContainerBlocks(DeletedBlocksTransaction delTX,
 -      Configuration config) throws IOException {
 +  private void deleteKeyValueContainerBlocks(
 +      KeyValueContainerData containerData, DeletedBlocksTransaction delTX)
 +      throws IOException {
      long containerId = delTX.getContainerID();
 -    ContainerData containerInfo = containerManager.readContainer(containerId);
      if (LOG.isDebugEnabled()) {
        LOG.debug("Processing Container : {}, DB path : {}", containerId,
 -          containerInfo.getDBPath());
 +          containerData.getMetadataPath());
      }
  
 -    if (delTX.getTxID() < containerInfo.getDeleteTransactionId()) {
++    if (delTX.getTxID() < containerData.getDeleteTransactionId()) {
+       LOG.debug(String.format("Ignoring delete blocks for containerId: %d."
+               + " Outdated delete transactionId %d < %d", containerId,
 -          delTX.getTxID(), containerInfo.getDeleteTransactionId()));
++          delTX.getTxID(), containerData.getDeleteTransactionId()));
+       return;
+     }
+ 
      int newDeletionBlocks = 0;
 -    MetadataStore containerDB = KeyUtils.getDB(containerInfo, config);
 +    MetadataStore containerDB = KeyUtils.getDB(containerData, conf);
      for (Long blk : delTX.getLocalIDList()) {
        BatchOperation batch = new BatchOperation();
        byte[] blkBytes = Longs.toByteArray(blk);
@@@ -208,13 -203,15 +224,15 @@@
          LOG.debug("Block {} not found or already under deletion in"
                  + " container {}, skip deleting it.", blk, containerId);
        }
-       containerDB.put(DFSUtil.string2Bytes(
-           OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + containerId),
-           Longs.toByteArray(delTX.getTxID()));
      }
  
+     containerDB.put(DFSUtil.string2Bytes(
+         OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + delTX.getContainerID()),
+         Longs.toByteArray(delTX.getTxID()));
 -    containerManager
 -        .updateDeleteTransactionId(delTX.getContainerID(), delTX.getTxID());
++    containerData
++        .updateDeleteTransactionId(delTX.getTxID());
      // update pending deletion blocks count in in-memory container status
 -    containerManager.incrPendingDeletionBlocks(newDeletionBlocks, containerId);
 +    containerData.incrPendingDeletionBlocks(newDeletionBlocks);
    }
  
    @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index ad1e706,a30c6f4..fff8611
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@@ -27,13 -27,11 +27,13 @@@ import org.apache.hadoop.hdds.scm.conta
  import org.apache.hadoop.hdfs.DFSUtil;
  import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 -import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 -import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 +import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
 +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
  import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
  import org.apache.hadoop.ozone.web.handlers.BucketArgs;
  import org.apache.hadoop.ozone.web.handlers.KeyArgs;
  import org.apache.hadoop.ozone.web.handlers.UserArgs;
@@@ -160,14 -158,15 +160,16 @@@ public class TestStorageContainerManage
  
    private MetadataStore getContainerMetadata(Long containerID)
        throws IOException {
-     ContainerInfo container = cluster.getStorageContainerManager()
-         .getClientProtocolServer().getContainer(containerID);
-     DatanodeDetails leadDN = container.getPipeline().getLeader();
+     ContainerWithPipeline containerWithPipeline = cluster
+         .getStorageContainerManager().getClientProtocolServer()
+         .getContainerWithPipeline(containerID);
+ 
+     DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
      OzoneContainer containerServer =
          getContainerServerByDatanodeUuid(leadDN.getUuidString());
 -    ContainerData containerData = containerServer.getContainerManager()
 -        .readContainer(containerID);
 +    KeyValueContainerData containerData = (KeyValueContainerData) containerServer
 +        .getContainerSet()
 +        .getContainer(containerID).getContainerData();
      return KeyUtils.getDB(containerData, conf);
    }
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index b832dd2,58b831b..30b18c2
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@@ -32,10 -32,10 +32,10 @@@ import org.apache.hadoop.ozone.client.O
  import org.apache.hadoop.ozone.client.OzoneClient;
  import org.apache.hadoop.ozone.client.OzoneClientFactory;
  import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 -import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 +import org.apache.hadoop.ozone.container.common.impl.ContainerData;
  import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
  import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
  import org.apache.hadoop.test.GenericTestUtils;
  import org.junit.AfterClass;
@@@ -204,14 -257,8 +257,8 @@@ public class TestCloseContainerByPipeli
          if (datanode.equals(datanodeService.getDatanodeDetails())) {
            containerData =
                datanodeService.getDatanodeStateMachine().getContainer()
 -                  .getContainerManager().readContainer(containerID);
 +                  .getContainerSet().getContainer(containerID).getContainerData();
-           if (!containerData.isOpen()) {
-             // make sure the closeContainerHandler on the Datanode is invoked
-             Assert.assertTrue(
-                 datanodeService.getDatanodeStateMachine().getCommandDispatcher()
-                     .getCloseContainerHandler().getInvocationCount() > 0);
-             return true;
-           }
+           return !containerData.isOpen();
          }
      } catch (StorageContainerException e) {
        throw new AssertionError(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index 114bd04,58a5154..682bd63
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@@ -27,9 -27,9 +27,9 @@@ import org.apache.hadoop.hdds.client.Re
  import org.apache.hadoop.hdds.client.ReplicationType;
  import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
  import org.apache.hadoop.ozone.client.rest.OzoneException;
 -import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 +import org.apache.hadoop.ozone.container.common.impl.ContainerData;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
  import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 0000000,5481506..c25b00e
mode 000000,100644..100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@@ -1,0 -1,143 +1,143 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with this
+  * work for additional information regarding copyright ownership.  The ASF
+  * licenses this file to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  * <p>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  * License for the specific language governing permissions and limitations under
+  * the License.
+  */
+ package org.apache.hadoop.ozone.om;
+ 
+ import org.apache.commons.lang3.RandomStringUtils;
+ 
+ import org.apache.hadoop.hdds.client.ReplicationFactor;
+ import org.apache.hadoop.hdds.client.ReplicationType;
+ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+ import org.apache.hadoop.ozone.MiniOzoneCluster;
+ import org.apache.hadoop.ozone.OzoneConfigKeys;
+ import org.apache.hadoop.ozone.OzoneConsts;
+ import org.apache.hadoop.ozone.client.*;
+ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 -import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 -import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
++import org.apache.hadoop.ozone.container.common.impl.ContainerData;
++import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+ import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+ import org.junit.AfterClass;
+ import org.junit.BeforeClass;
+ import org.junit.Rule;
+ import org.junit.Test;
+ import org.junit.rules.ExpectedException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * This class tests container report with DN container state info.
+  */
+ public class TestContainerReportWithKeys {
+   private static final Logger LOG = LoggerFactory.getLogger(
+       TestContainerReportWithKeys.class);
+   private static MiniOzoneCluster cluster = null;
+   private static OzoneConfiguration conf;
+   private static StorageContainerManager scm;
+ 
+   @Rule
+   public ExpectedException exception = ExpectedException.none();
+ 
+   /**
+    * Create a MiniDFSCluster for testing.
+    * <p>
+    * Ozone is made active by setting OZONE_ENABLED = true and
+    * OZONE_HANDLER_TYPE_KEY = "distributed"
+    *
+    * @throws IOException
+    */
+   @BeforeClass
+   public static void init() throws Exception {
+     conf = new OzoneConfiguration();
+     conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+         OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+     cluster = MiniOzoneCluster.newBuilder(conf).build();
+     cluster.waitForClusterToBeReady();
+     scm = cluster.getStorageContainerManager();
+   }
+ 
+   /**
+    * Shutdown MiniDFSCluster.
+    */
+   @AfterClass
+   public static void shutdown() {
+     if (cluster != null) {
+       cluster.shutdown();
+     }
+   }
+ 
+   @Test
+   public void testContainerReportKeyWrite() throws Exception {
+     final String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+     final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+     final String keyName = "key" + RandomStringUtils.randomNumeric(5);
+     final int keySize = 100;
+ 
+     OzoneClient client = OzoneClientFactory.getClient(conf);
+     ObjectStore objectStore = client.getObjectStore();
+     objectStore.createVolume(volumeName);
+     objectStore.getVolume(volumeName).createBucket(bucketName);
+     OzoneOutputStream key =
+         objectStore.getVolume(volumeName).getBucket(bucketName)
+             .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
+                 ReplicationFactor.ONE);
+     String dataString = RandomStringUtils.randomAlphabetic(keySize);
+     key.write(dataString.getBytes());
+     key.close();
+ 
+     OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+         .setVolumeName(volumeName)
+         .setBucketName(bucketName)
+         .setKeyName(keyName)
+         .setType(HddsProtos.ReplicationType.STAND_ALONE)
+         .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize)
+         .build();
+ 
+ 
+     OmKeyLocationInfo keyInfo =
+         cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
+             .get(0).getBlocksLatestVersionOnly().get(0);
+ 
+     ContainerData cd = getContainerData(keyInfo.getContainerID());
+ 
 -    LOG.info("DN Container Data:  keyCount: {} used: {} ",
 -        cd.getKeyCount(), cd.getBytesUsed());
++/*    LOG.info("DN Container Data:  keyCount: {} used: {} ",
++        cd.getKeyCount(), cd.getBytesUsed());*/
+ 
+     ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
+ 
+     LOG.info("SCM Container Info keyCount: {} usedBytes: {}",
+         cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
+   }
+ 
+ 
+   private static ContainerData getContainerData(long containerID) {
+     ContainerData containerData;
+     try {
 -      ContainerManager containerManager = cluster.getHddsDatanodes().get(0)
 -          .getDatanodeStateMachine().getContainer().getContainerManager();
 -      containerData = containerManager.readContainer(containerID);
++      ContainerSet containerManager = cluster.getHddsDatanodes().get(0)
++          .getDatanodeStateMachine().getContainer().getContainerSet();
++      containerData = containerManager.getContainer(containerID).getContainerData();
+     } catch (StorageContainerException e) {
+       throw new AssertionError(e);
+     }
+     return containerData;
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index 5c62803,42bb936..a2d95e8
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@@ -141,9 -144,8 +144,8 @@@ public class TestContainerSmallFile 
      ContainerProtocolCalls.writeSmallFile(client, blockID,
          "data123".getBytes(), traceID);
  
- 
      thrown.expect(StorageContainerException.class);
 -    thrown.expectMessage("Unable to find the container");
 +    thrown.expectMessage("ContainerID 8888 does not exist");
  
      // Try to read a invalid key
      ContainerProtos.GetSmallFileResponseProto response =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 12d444a,a6bb586..cc11feb
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@@ -162,21 -158,22 +163,22 @@@ public class TestSCMCli 
      // 1. Test to delete a non-empty container.
      // ****************************************
      // Create an non-empty container
-     ContainerInfo container = containerOperationClient
+     ContainerWithPipeline container = containerOperationClient
          .createContainer(xceiverClientManager.getType(),
              HddsProtos.ReplicationFactor.ONE, containerOwner);
--
 -    ContainerData cdata = ContainerData
 -        .getFromProtBuf(containerOperationClient.readContainer(
 -            container.getContainerInfo().getContainerID()), conf);
 -    KeyUtils.getDB(cdata, conf)
 +    KeyValueContainerData kvData = KeyValueContainerData
 +        .getFromProtoBuf(containerOperationClient.readContainer(
-             container.getContainerID(), container.getPipeline()));
++            container.getContainerInfo().getContainerID(), container
++                .getPipeline()));
 +    KeyUtils.getDB(kvData, conf)
-         .put(Longs.toByteArray(container.getContainerID()),
+         .put(Longs.toByteArray(container.getContainerInfo().getContainerID()),
              "someKey".getBytes());
-     Assert.assertTrue(containerExist(container.getContainerID()));
 -    Assert.assertTrue(
 -        containerExist(container.getContainerInfo().getContainerID()));
++    Assert.assertTrue(containerExist(container.getContainerInfo()
++        .getContainerID()));
  
      // Gracefully delete a container should fail because it is open.
-     delCmd = new String[] {"-container", "-delete", "-c",
-         Long.toString(container.getContainerID())};
+     delCmd = new String[]{"-container", "-delete", "-c",
+         Long.toString(container.getContainerInfo().getContainerID())};
      testErr = new ByteArrayOutputStream();
      ByteArrayOutputStream out = new ByteArrayOutputStream();
      exitCode = runCommandAndGetOutput(delCmd, out, testErr);
@@@ -275,26 -267,24 +272,27 @@@
          EXECUTION_ERROR, exitCode);
  
      // Create an empty container.
-     ContainerInfo container = containerOperationClient
+     ContainerWithPipeline container = containerOperationClient
          .createContainer(xceiverClientManager.getType(),
              HddsProtos.ReplicationFactor.ONE, containerOwner);
 -    ContainerData data = ContainerData.getFromProtBuf(containerOperationClient
 -        .readContainer(container.getContainerInfo().getContainerID()), conf);
 -
 +    KeyValueContainerData data = KeyValueContainerData
 +        .getFromProtoBuf(containerOperationClient.
-             readContainer(container.getContainerID(),
++            readContainer(container.getContainerInfo().getContainerID(),
 +                container.getPipeline()));
- 
-     info = new String[] { "-container", "-info", "-c",
-         Long.toString(container.getContainerID()) };
+     info = new String[]{"-container", "-info", "-c",
+         Long.toString(container.getContainerInfo().getContainerID())};
      ByteArrayOutputStream out = new ByteArrayOutputStream();
      exitCode = runCommandAndGetOutput(info, out, null);
      assertEquals("Expected Success, did not find it.", ResultCode.SUCCESS,
-             exitCode);
+         exitCode);
  
      String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
 -    String expected = String.format(formatStr, container.getContainerInfo()
 -            .getContainerID(), openStatus, data.getDBPath(),
 -        data.getContainerPath(), "", datanodeDetails.getHostName(),
 -        datanodeDetails.getHostName());
 +    String expected =
-         String.format(formatStr, container.getContainerID(), openStatus,
-         data.getDbFile().getPath(), data.getContainerPath(), "",
-         datanodeDetails.getHostName(), datanodeDetails.getHostName());
++        String.format(formatStr, container.getContainerInfo().getContainerID
++                (), openStatus, data.getDbFile().getPath(), data
++                .getContainerPath(), "", datanodeDetails.getHostName(),
++            datanodeDetails.getHostName());
++
      assertEquals(expected, out.toString());
  
      out.reset();
@@@ -303,9 -293,9 +301,10 @@@
      container = containerOperationClient
          .createContainer(xceiverClientManager.getType(),
              HddsProtos.ReplicationFactor.ONE, containerOwner);
 -    data = ContainerData
 -        .getFromProtBuf(containerOperationClient.readContainer(
 -            container.getContainerInfo().getContainerID()), conf);
 +    data = KeyValueContainerData
 +        .getFromProtoBuf(containerOperationClient.readContainer(
-             container.getContainerID(), container.getPipeline()));
++            container.getContainerInfo().getContainerID(), container
++                .getPipeline()));
      KeyUtils.getDB(data, conf)
          .put(containerID.getBytes(), "someKey".getBytes());
  
@@@ -315,25 -305,24 +314,27 @@@
      assertEquals(ResultCode.SUCCESS, exitCode);
  
      openStatus = data.isOpen() ? "OPEN" : "CLOSED";
-     expected = String.format(formatStr, container.getContainerID(), openStatus,
-         data.getDbFile().getPath(), data.getContainerPath(), "",
-         datanodeDetails.getHostName(), datanodeDetails.getHostName());
 -    expected = String.format(formatStr, container.getContainerInfo().
 -            getContainerID(), openStatus, data.getDBPath(),
 -        data.getContainerPath(), "", datanodeDetails.getHostName(),
++
++    expected = String.format(formatStr, container.getContainerInfo()
++            .getContainerID(), openStatus, data.getDbFile().getPath(), data
++            .getContainerPath(), "", datanodeDetails.getHostName(),
+         datanodeDetails.getHostName());
      assertEquals(expected, out.toString());
  
      out.reset();
  
- 
      // Close last container and test info again.
-     containerOperationClient.closeContainer(
-         container.getContainerID(), container.getPipeline());
+     containerOperationClient
+         .closeContainer(container.getContainerInfo().getContainerID());
  
-     info = new String[] { "-container", "-info", "-c",
-         Long.toString(container.getContainerID()) };
+     info = new String[]{"-container", "-info", "-c",
+         Long.toString(container.getContainerInfo().getContainerID())};
      exitCode = runCommandAndGetOutput(info, out, null);
      assertEquals(ResultCode.SUCCESS, exitCode);
 -    data = ContainerData.getFromProtBuf(containerOperationClient
 -        .readContainer(container.getContainerInfo().getContainerID()), conf);
 +    data = KeyValueContainerData
 +        .getFromProtoBuf(containerOperationClient.readContainer(
-             container.getContainerID(), container.getPipeline()));
++            container.getContainerInfo().getContainerID(), container
++                .getPipeline()));
  
      openStatus = data.isOpen() ? "OPEN" : "CLOSED";
      expected = String

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c275a9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 2f592c1,a95bd0e..c144db2
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@@ -44,18 -44,17 +44,18 @@@ import org.apache.hadoop.ozone.client.i
  import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
  import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
  import org.apache.hadoop.ozone.client.rpc.RpcClient;
 -import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
  import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 +import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
  import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
- import org.apache.hadoop.ozone.ksm.KeySpaceManager;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
- import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
- import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
- import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
- import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos
+ import org.apache.hadoop.ozone.om.OzoneManager;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
      .Status;
  import org.apache.hadoop.ozone.client.rest.OzoneException;
  import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@@@ -663,12 -661,11 +663,12 @@@ public class TestKeys 
    }
  
    @Test
 +  @Ignore("Needs to be fixed for new SCM and Storage design")
    public void testDeleteKey() throws Exception {
-     KeySpaceManager ksm = ozoneCluster.getKeySpaceManager();
+     OzoneManager ozoneManager = ozoneCluster.getOzoneManager();
      // To avoid interference from other test cases,
      // we collect number of existing keys at the beginning
-     int numOfExistedKeys = countKsmKeys(ksm);
+     int numOfExistedKeys = countOmKeys(ozoneManager);
  
      // Keep tracking bucket keys info while creating them
      PutHelper helper = new PutHelper(client, path);
@@@ -697,20 -694,17 +697,20 @@@
  
        // Memorize chunks that has been created,
        // so we can verify actual deletions at DN side later.
-       for (KsmKeyInfo keyInfo : createdKeys) {
-         List<KsmKeyLocationInfo> locations =
+       for (OmKeyInfo keyInfo : createdKeys) {
+         List<OmKeyLocationInfo> locations =
              keyInfo.getLatestVersionLocations().getLocationList();
-         for (KsmKeyLocationInfo location : locations) {
+         for (OmKeyLocationInfo location : locations) {
 -          KeyData keyData = new KeyData(location.getBlockID());
 -          KeyData blockInfo = cm.getContainerManager()
 -              .getKeyManager().getKey(keyData);
 -          ContainerData containerData = cm.getContainerManager()
 -              .readContainer(keyData.getContainerID());
 -          File dataDir = ContainerUtils
 -              .getDataDirectory(containerData).toFile();
 +          KeyValueHandler  keyValueHandler = (KeyValueHandler) cm
 +              .getDispatcher().getHandler(ContainerProtos.ContainerType
 +                  .KeyValueContainer);
 +          KeyValueContainer container = (KeyValueContainer) cm.getContainerSet()
 +              .getContainer(location.getBlockID().getContainerID());
 +          KeyData blockInfo = keyValueHandler
 +              .getKeyManager().getKey(container, location.getBlockID());
 +          KeyValueContainerData containerData = (KeyValueContainerData) container
 +              .getContainerData();
 +          File dataDir = new File(containerData.getChunksPath());
            for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {
              File chunkFile = dataDir.toPath()
                  .resolve(chunkInfo.getChunkName()).toFile();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-8435. Fix NPE when the same client simultaneously contact for the first time Yarn Router. Contributed by Rang Jiaheng.

Posted by bh...@apache.org.
YARN-8435. Fix NPE when the same client simultaneously contact for the first time Yarn Router. Contributed by Rang Jiaheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d9804dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d9804dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d9804dc

Branch: refs/heads/HDDS-48
Commit: 0d9804dcef2eab5ebf84667d9ca49bb035d9a731
Parents: 71df8c2
Author: Giovanni Matteo Fumarola <gi...@apache.com>
Authored: Thu Jul 5 10:54:31 2018 -0700
Committer: Giovanni Matteo Fumarola <gi...@apache.com>
Committed: Thu Jul 5 10:54:31 2018 -0700

----------------------------------------------------------------------
 .../router/clientrm/RouterClientRMService.java  | 53 ++++++++--------
 .../router/rmadmin/RouterRMAdminService.java    | 51 ++++++++-------
 .../server/router/webapp/RouterWebServices.java | 48 +++++++--------
 .../clientrm/TestRouterClientRMService.java     | 60 ++++++++++++++++++
 .../rmadmin/TestRouterRMAdminService.java       | 60 ++++++++++++++++++
 .../router/webapp/TestRouterWebServices.java    | 65 ++++++++++++++++++++
 6 files changed, 259 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
index 73cc185..bbb8047 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
@@ -430,13 +430,15 @@ public class RouterClientRMService extends AbstractService
     return pipeline.getRootInterceptor().getResourceTypeInfo(request);
   }
 
-  private RequestInterceptorChainWrapper getInterceptorChain()
+  @VisibleForTesting
+  protected RequestInterceptorChainWrapper getInterceptorChain()
       throws IOException {
     String user = UserGroupInformation.getCurrentUser().getUserName();
-    if (!userPipelineMap.containsKey(user)) {
-      initializePipeline(user);
+    RequestInterceptorChainWrapper chain = userPipelineMap.get(user);
+    if (chain != null && chain.getRootInterceptor() != null) {
+      return chain;
     }
-    return userPipelineMap.get(user);
+    return initializePipeline(user);
   }
 
   /**
@@ -503,36 +505,33 @@ public class RouterClientRMService extends AbstractService
    *
    * @param user
    */
-  private void initializePipeline(String user) {
-    RequestInterceptorChainWrapper chainWrapper = null;
+  private RequestInterceptorChainWrapper initializePipeline(String user) {
     synchronized (this.userPipelineMap) {
       if (this.userPipelineMap.containsKey(user)) {
         LOG.info("Request to start an already existing user: {}"
             + " was received, so ignoring.", user);
-        return;
+        return userPipelineMap.get(user);
       }
 
-      chainWrapper = new RequestInterceptorChainWrapper();
-      this.userPipelineMap.put(user, chainWrapper);
-    }
-
-    // We register the pipeline instance in the map first and then initialize it
-    // later because chain initialization can be expensive and we would like to
-    // release the lock as soon as possible to prevent other applications from
-    // blocking when one application's chain is initializing
-    LOG.info("Initializing request processing pipeline for application "
-        + "for the user: {}", user);
-
-    try {
-      ClientRequestInterceptor interceptorChain =
-          this.createRequestInterceptorChain();
-      interceptorChain.init(user);
-      chainWrapper.init(interceptorChain);
-    } catch (Exception e) {
-      synchronized (this.userPipelineMap) {
-        this.userPipelineMap.remove(user);
+      RequestInterceptorChainWrapper chainWrapper =
+          new RequestInterceptorChainWrapper();
+      try {
+        // We should init the pipeline instance after it is created and then
+        // add to the map, to ensure thread safe.
+        LOG.info("Initializing request processing pipeline for application "
+            + "for the user: {}", user);
+
+        ClientRequestInterceptor interceptorChain =
+            this.createRequestInterceptorChain();
+        interceptorChain.init(user);
+        chainWrapper.init(interceptorChain);
+      } catch (Exception e) {
+        LOG.error("Init ClientRequestInterceptor error for user: " + user, e);
+        throw e;
       }
-      throw e;
+
+      this.userPipelineMap.put(user, chainWrapper);
+      return chainWrapper;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
index b8b7ad8..ef30613 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
@@ -165,13 +165,15 @@ public class RouterRMAdminService extends AbstractService
     return interceptorClassNames;
   }
 
-  private RequestInterceptorChainWrapper getInterceptorChain()
+  @VisibleForTesting
+  protected RequestInterceptorChainWrapper getInterceptorChain()
       throws IOException {
     String user = UserGroupInformation.getCurrentUser().getUserName();
-    if (!userPipelineMap.containsKey(user)) {
-      initializePipeline(user);
+    RequestInterceptorChainWrapper chain = userPipelineMap.get(user);
+    if (chain != null && chain.getRootInterceptor() != null) {
+      return chain;
     }
-    return userPipelineMap.get(user);
+    return initializePipeline(user);
   }
 
   /**
@@ -239,35 +241,32 @@ public class RouterRMAdminService extends AbstractService
    *
    * @param user
    */
-  private void initializePipeline(String user) {
-    RequestInterceptorChainWrapper chainWrapper = null;
+  private RequestInterceptorChainWrapper initializePipeline(String user) {
     synchronized (this.userPipelineMap) {
       if (this.userPipelineMap.containsKey(user)) {
         LOG.info("Request to start an already existing user: {}"
             + " was received, so ignoring.", user);
-        return;
+        return userPipelineMap.get(user);
       }
 
-      chainWrapper = new RequestInterceptorChainWrapper();
-      this.userPipelineMap.put(user, chainWrapper);
-    }
-
-    // We register the pipeline instance in the map first and then initialize it
-    // later because chain initialization can be expensive and we would like to
-    // release the lock as soon as possible to prevent other applications from
-    // blocking when one application's chain is initializing
-    LOG.info("Initializing request processing pipeline for the user: {}", user);
-
-    try {
-      RMAdminRequestInterceptor interceptorChain =
-          this.createRequestInterceptorChain();
-      interceptorChain.init(user);
-      chainWrapper.init(interceptorChain);
-    } catch (Exception e) {
-      synchronized (this.userPipelineMap) {
-        this.userPipelineMap.remove(user);
+      RequestInterceptorChainWrapper chainWrapper =
+          new RequestInterceptorChainWrapper();
+      try {
+        // We should init the pipeline instance after it is created and then
+        // add to the map, to ensure thread safe.
+        LOG.info("Initializing request processing pipeline for user: {}", user);
+
+        RMAdminRequestInterceptor interceptorChain =
+            this.createRequestInterceptorChain();
+        interceptorChain.init(user);
+        chainWrapper.init(interceptorChain);
+      } catch (Exception e) {
+        LOG.error("Init RMAdminRequestInterceptor error for user: " + user, e);
+        throw e;
       }
-      throw e;
+
+      this.userPipelineMap.put(user, chainWrapper);
+      return chainWrapper;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
index ae57f1c..49de588 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
@@ -173,10 +173,11 @@ public class RouterWebServices implements RMWebServiceProtocol {
     } catch (IOException e) {
       LOG.error("Cannot get user: {}", e.getMessage());
     }
-    if (!userPipelineMap.containsKey(user)) {
-      initializePipeline(user);
+    RequestInterceptorChainWrapper chain = userPipelineMap.get(user);
+    if (chain != null && chain.getRootInterceptor() != null) {
+      return chain;
     }
-    return userPipelineMap.get(user);
+    return initializePipeline(user);
   }
 
   /**
@@ -242,35 +243,32 @@ public class RouterWebServices implements RMWebServiceProtocol {
    *
    * @param user
    */
-  private void initializePipeline(String user) {
-    RequestInterceptorChainWrapper chainWrapper = null;
+  private RequestInterceptorChainWrapper initializePipeline(String user) {
     synchronized (this.userPipelineMap) {
       if (this.userPipelineMap.containsKey(user)) {
         LOG.info("Request to start an already existing user: {}"
             + " was received, so ignoring.", user);
-        return;
+        return userPipelineMap.get(user);
       }
 
-      chainWrapper = new RequestInterceptorChainWrapper();
-      this.userPipelineMap.put(user, chainWrapper);
-    }
-
-    // We register the pipeline instance in the map first and then initialize it
-    // later because chain initialization can be expensive and we would like to
-    // release the lock as soon as possible to prevent other applications from
-    // blocking when one application's chain is initializing
-    LOG.info("Initializing request processing pipeline for the user: {}", user);
-
-    try {
-      RESTRequestInterceptor interceptorChain =
-          this.createRequestInterceptorChain();
-      interceptorChain.init(user);
-      chainWrapper.init(interceptorChain);
-    } catch (Exception e) {
-      synchronized (this.userPipelineMap) {
-        this.userPipelineMap.remove(user);
+      RequestInterceptorChainWrapper chainWrapper =
+          new RequestInterceptorChainWrapper();
+      try {
+        // We should init the pipeline instance after it is created and then
+        // add to the map, to ensure thread safe.
+        LOG.info("Initializing request processing pipeline for user: {}", user);
+
+        RESTRequestInterceptor interceptorChain =
+            this.createRequestInterceptorChain();
+        interceptorChain.init(user);
+        chainWrapper.init(interceptorChain);
+      } catch (Exception e) {
+        LOG.error("Init RESTRequestInterceptor error for user: " + user, e);
+        throw e;
       }
-      throw e;
+
+      this.userPipelineMap.put(user, chainWrapper);
+      return chainWrapper;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
index a9c3729..b03059d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.yarn.server.router.clientrm;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
@@ -207,4 +209,62 @@ public class TestRouterClientRMService extends BaseRouterClientRMTest {
     Assert.assertNull("test2 should have been evicted", chain);
   }
 
+  /**
+   * This test validates if the ClientRequestInterceptor chain for the user
+   * can build and init correctly when a multi-client process begins to
+   * request RouterClientRMService for the same user simultaneously.
+   */
+  @Test
+  public void testClientPipelineConcurrent() throws InterruptedException {
+    final String user = "test1";
+
+    /*
+     * ClientTestThread is a thread to simulate a client request to get a
+     * ClientRequestInterceptor for the user.
+     */
+    class ClientTestThread extends Thread {
+      private ClientRequestInterceptor interceptor;
+      @Override public void run() {
+        try {
+          interceptor = pipeline();
+        } catch (IOException | InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+      private ClientRequestInterceptor pipeline()
+          throws IOException, InterruptedException {
+        return UserGroupInformation.createRemoteUser(user).doAs(
+            new PrivilegedExceptionAction<ClientRequestInterceptor>() {
+              @Override
+              public ClientRequestInterceptor run() throws Exception {
+                RequestInterceptorChainWrapper wrapper =
+                    getRouterClientRMService().getInterceptorChain();
+                ClientRequestInterceptor interceptor =
+                    wrapper.getRootInterceptor();
+                Assert.assertNotNull(interceptor);
+                LOG.info("init client interceptor success for user " + user);
+                return interceptor;
+              }
+            });
+      }
+    }
+
+    /*
+     * We start the first thread. It should not finish initing a chainWrapper
+     * before the other thread starts. In this way, the second thread can
+     * init at the same time of the first one. In the end, we validate that
+     * the 2 threads get the same chainWrapper without going into error.
+     */
+    ClientTestThread client1 = new ClientTestThread();
+    ClientTestThread client2 = new ClientTestThread();
+    client1.start();
+    client2.start();
+    client1.join();
+    client2.join();
+
+    Assert.assertNotNull(client1.interceptor);
+    Assert.assertNotNull(client2.interceptor);
+    Assert.assertTrue(client1.interceptor == client2.interceptor);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
index 11786e6..07ef73c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.yarn.server.router.rmadmin;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
@@ -216,4 +218,62 @@ public class TestRouterRMAdminService extends BaseRouterRMAdminTest {
     Assert.assertNull("test2 should have been evicted", chain);
   }
 
+  /**
+   * This test validates if the RMAdminRequestInterceptor chain for the user
+   * can build and init correctly when a multi-client process begins to
+   * request RouterRMAdminService for the same user simultaneously.
+   */
+  @Test
+  public void testRMAdminPipelineConcurrent() throws InterruptedException {
+    final String user = "test1";
+
+    /*
+     * ClientTestThread is a thread to simulate a client request to get a
+     * RMAdminRequestInterceptor for the user.
+     */
+    class ClientTestThread extends Thread {
+      private RMAdminRequestInterceptor interceptor;
+      @Override public void run() {
+        try {
+          interceptor = pipeline();
+        } catch (IOException | InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+      private RMAdminRequestInterceptor pipeline()
+          throws IOException, InterruptedException {
+        return UserGroupInformation.createRemoteUser(user).doAs(
+            new PrivilegedExceptionAction<RMAdminRequestInterceptor>() {
+              @Override
+              public RMAdminRequestInterceptor run() throws Exception {
+                RequestInterceptorChainWrapper wrapper =
+                    getRouterRMAdminService().getInterceptorChain();
+                RMAdminRequestInterceptor interceptor =
+                    wrapper.getRootInterceptor();
+                Assert.assertNotNull(interceptor);
+                LOG.info("init rm admin interceptor success for user" + user);
+                return interceptor;
+              }
+            });
+      }
+    }
+
+    /*
+     * We start the first thread. It should not finish initing a chainWrapper
+     * before the other thread starts. In this way, the second thread can
+     * init at the same time of the first one. In the end, we validate that
+     * the 2 threads get the same chainWrapper without going into error.
+     */
+    ClientTestThread client1 = new ClientTestThread();
+    ClientTestThread client2 = new ClientTestThread();
+    client1.start();
+    client2.start();
+    client1.join();
+    client2.join();
+
+    Assert.assertNotNull(client1.interceptor);
+    Assert.assertNotNull(client2.interceptor);
+    Assert.assertTrue(client1.interceptor == client2.interceptor);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
index c96575c..1465243 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
@@ -19,10 +19,12 @@
 package org.apache.hadoop.yarn.server.router.webapp;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
 import javax.ws.rs.core.Response;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo;
@@ -49,12 +51,17 @@ import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test class to validate the WebService interceptor model inside the Router.
  */
 public class TestRouterWebServices extends BaseRouterWebServicesTest {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRouterWebServices.class);
+
   private String user = "test1";
 
   /**
@@ -266,4 +273,62 @@ public class TestRouterWebServices extends BaseRouterWebServicesTest {
     Assert.assertNull("test2 should have been evicted", chain);
   }
 
+  /**
+   * This test validates if the RESTRequestInterceptor chain for the user
+   * can build and init correctly when a multi-client process begins to
+   * request RouterWebServices for the same user simultaneously.
+   */
+  @Test
+  public void testWebPipelineConcurrent() throws InterruptedException {
+    final String user = "test1";
+
+    /*
+     * ClientTestThread is a thread to simulate a client request to get a
+     * RESTRequestInterceptor for the user.
+     */
+    class ClientTestThread extends Thread {
+      private RESTRequestInterceptor interceptor;
+      @Override public void run() {
+        try {
+          interceptor = pipeline();
+        } catch (IOException | InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+      private RESTRequestInterceptor pipeline()
+          throws IOException, InterruptedException {
+        return UserGroupInformation.createRemoteUser(user).doAs(
+            new PrivilegedExceptionAction<RESTRequestInterceptor>() {
+              @Override
+              public RESTRequestInterceptor run() throws Exception {
+                RequestInterceptorChainWrapper wrapper =
+                    getInterceptorChain(user);
+                RESTRequestInterceptor interceptor =
+                    wrapper.getRootInterceptor();
+                Assert.assertNotNull(interceptor);
+                LOG.info("init web interceptor success for user" + user);
+                return interceptor;
+              }
+            });
+      }
+    }
+
+    /*
+     * We start the first thread. It should not finish initing a chainWrapper
+     * before the other thread starts. In this way, the second thread can
+     * init at the same time of the first one. In the end, we validate that
+     * the 2 threads get the same chainWrapper without going into error.
+     */
+    ClientTestThread client1 = new ClientTestThread();
+    ClientTestThread client2 = new ClientTestThread();
+    client1.start();
+    client2.start();
+    client1.join();
+    client2.join();
+
+    Assert.assertNotNull(client1.interceptor);
+    Assert.assertNotNull(client2.interceptor);
+    Assert.assertTrue(client1.interceptor == client2.interceptor);
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
index 54e219b..fbd6eb8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.client.rest;
 
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 43b94a1..fc70514 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.KsmUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -43,24 +43,22 @@ import org.apache.hadoop.ozone.client.io.LengthInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolPB;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
+    .OzoneManagerProtocolProtos.ServicePort;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.protocolPB
@@ -80,7 +78,7 @@ import java.util.UUID;
 import java.util.stream.Collectors;
 
 /**
- * Ozone RPC Client Implementation, it connects to KSM, SCM and DataNode
+ * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode
  * to execute client calls. This uses RPC protocol for communication
  * with the servers.
  */
@@ -92,8 +90,8 @@ public class RpcClient implements ClientProtocol {
   private final OzoneConfiguration conf;
   private final StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB
-      keySpaceManagerClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB
+      ozoneManagerClient;
   private final XceiverClientManager xceiverClientManager;
   private final int chunkSize;
   private final UserGroupInformation ugi;
@@ -109,20 +107,20 @@ public class RpcClient implements ClientProtocol {
     Preconditions.checkNotNull(conf);
     this.conf = new OzoneConfiguration(conf);
     this.ugi = UserGroupInformation.getCurrentUser();
-    this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
-    this.groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT);
-    long ksmVersion =
-        RPC.getProtocolVersion(KeySpaceManagerProtocolPB.class);
-    InetSocketAddress ksmAddress = KsmUtils
-        .getKsmAddressForClients(conf);
-    RPC.setProtocolEngine(conf, KeySpaceManagerProtocolPB.class,
+    this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
+        OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
+    this.groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS,
+        OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT);
+    long omVersion =
+        RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+    InetSocketAddress omAddress = OmUtils
+        .getOmAddressForClients(conf);
+    RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
         ProtobufRpcEngine.class);
-    this.keySpaceManagerClient =
-        new KeySpaceManagerProtocolClientSideTranslatorPB(
-            RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion,
-                ksmAddress, UserGroupInformation.getCurrentUser(), conf,
+    this.ozoneManagerClient =
+        new OzoneManagerProtocolClientSideTranslatorPB(
+            RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
+                omAddress, UserGroupInformation.getCurrentUser(), conf,
                 NetUtils.getDefaultSocketFactory(conf),
                 Client.getRpcTimeout(conf)));
 
@@ -155,7 +153,7 @@ public class RpcClient implements ClientProtocol {
   }
 
   private InetSocketAddress getScmAddressForClient() throws IOException {
-    List<ServiceInfo> services = keySpaceManagerClient.getServiceList();
+    List<ServiceInfo> services = ozoneManagerClient.getServiceList();
     ServiceInfo scmInfo = services.stream().filter(
         a -> a.getNodeType().equals(HddsProtos.NodeType.SCM))
         .collect(Collectors.toList()).get(0);
@@ -195,7 +193,7 @@ public class RpcClient implements ClientProtocol {
       listOfAcls.addAll(volArgs.getAcls());
     }
 
-    KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder();
+    OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder();
     builder.setVolume(volumeName);
     builder.setAdminName(admin);
     builder.setOwnerName(owner);
@@ -204,12 +202,12 @@ public class RpcClient implements ClientProtocol {
     //Remove duplicates and add ACLs
     for (OzoneAcl ozoneAcl :
         listOfAcls.stream().distinct().collect(Collectors.toList())) {
-      builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(ozoneAcl));
+      builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl));
     }
 
     LOG.info("Creating Volume: {}, with {} as owner and quota set to {} bytes.",
         volumeName, owner, quota);
-    keySpaceManagerClient.createVolume(builder.build());
+    ozoneManagerClient.createVolume(builder.build());
   }
 
   @Override
@@ -217,7 +215,7 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(owner);
-    keySpaceManagerClient.setOwner(volumeName, owner);
+    ozoneManagerClient.setOwner(volumeName, owner);
   }
 
   @Override
@@ -226,14 +224,14 @@ public class RpcClient implements ClientProtocol {
     HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(quota);
     long quotaInBytes = quota.sizeInBytes();
-    keySpaceManagerClient.setQuota(volumeName, quotaInBytes);
+    ozoneManagerClient.setQuota(volumeName, quotaInBytes);
   }
 
   @Override
   public OzoneVolume getVolumeDetails(String volumeName)
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName);
-    KsmVolumeArgs volume = keySpaceManagerClient.getVolumeInfo(volumeName);
+    OmVolumeArgs volume = ozoneManagerClient.getVolumeInfo(volumeName);
     return new OzoneVolume(
         conf,
         this,
@@ -243,7 +241,7 @@ public class RpcClient implements ClientProtocol {
         volume.getQuotaInBytes(),
         volume.getCreationTime(),
         volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
+            map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
   }
 
   @Override
@@ -255,14 +253,14 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void deleteVolume(String volumeName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName);
-    keySpaceManagerClient.deleteVolume(volumeName);
+    ozoneManagerClient.deleteVolume(volumeName);
   }
 
   @Override
   public List<OzoneVolume> listVolumes(String volumePrefix, String prevVolume,
                                        int maxListResult)
       throws IOException {
-    List<KsmVolumeArgs> volumes = keySpaceManagerClient.listAllVolumes(
+    List<OmVolumeArgs> volumes = ozoneManagerClient.listAllVolumes(
         volumePrefix, prevVolume, maxListResult);
 
     return volumes.stream().map(volume -> new OzoneVolume(
@@ -274,7 +272,7 @@ public class RpcClient implements ClientProtocol {
         volume.getQuotaInBytes(),
         volume.getCreationTime(),
         volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
+            map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
         .collect(Collectors.toList());
   }
 
@@ -282,7 +280,7 @@ public class RpcClient implements ClientProtocol {
   public List<OzoneVolume> listVolumes(String user, String volumePrefix,
                                        String prevVolume, int maxListResult)
       throws IOException {
-    List<KsmVolumeArgs> volumes = keySpaceManagerClient.listVolumeByUser(
+    List<OmVolumeArgs> volumes = ozoneManagerClient.listVolumeByUser(
         user, volumePrefix, prevVolume, maxListResult);
 
     return volumes.stream().map(volume -> new OzoneVolume(
@@ -294,7 +292,7 @@ public class RpcClient implements ClientProtocol {
         volume.getQuotaInBytes(),
         volume.getCreationTime(),
         volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
+            map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
         .collect(Collectors.toList());
   }
 
@@ -329,7 +327,7 @@ public class RpcClient implements ClientProtocol {
       listOfAcls.addAll(bucketArgs.getAcls());
     }
 
-    KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder();
+    OmBucketInfo.Builder builder = OmBucketInfo.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setIsVersionEnabled(isVersionEnabled)
@@ -339,7 +337,7 @@ public class RpcClient implements ClientProtocol {
     LOG.info("Creating Bucket: {}/{}, with Versioning {} and " +
             "Storage Type set to {}", volumeName, bucketName, isVersionEnabled,
             storageType);
-    keySpaceManagerClient.createBucket(builder.build());
+    ozoneManagerClient.createBucket(builder.build());
   }
 
   @Override
@@ -348,11 +346,11 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(addAcls);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setAddAcls(addAcls);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
@@ -361,11 +359,11 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(removeAcls);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setRemoveAcls(removeAcls);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
@@ -374,11 +372,11 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(versioning);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setIsVersionEnabled(versioning);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
@@ -387,18 +385,18 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(storageType);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setStorageType(storageType);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
   public void deleteBucket(
       String volumeName, String bucketName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    keySpaceManagerClient.deleteBucket(volumeName, bucketName);
+    ozoneManagerClient.deleteBucket(volumeName, bucketName);
   }
 
   @Override
@@ -411,8 +409,8 @@ public class RpcClient implements ClientProtocol {
   public OzoneBucket getBucketDetails(
       String volumeName, String bucketName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    KsmBucketInfo bucketArgs =
-        keySpaceManagerClient.getBucketInfo(volumeName, bucketName);
+    OmBucketInfo bucketArgs =
+        ozoneManagerClient.getBucketInfo(volumeName, bucketName);
     return new OzoneBucket(
         conf,
         this,
@@ -428,7 +426,7 @@ public class RpcClient implements ClientProtocol {
   public List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
                                        String prevBucket, int maxListResult)
       throws IOException {
-    List<KsmBucketInfo> buckets = keySpaceManagerClient.listBuckets(
+    List<OmBucketInfo> buckets = ozoneManagerClient.listBuckets(
         volumeName, prevBucket, bucketPrefix, maxListResult);
 
     return buckets.stream().map(bucket -> new OzoneBucket(
@@ -451,7 +449,7 @@ public class RpcClient implements ClientProtocol {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     HddsClientUtils.checkNotNull(keyName, type, factor);
     String requestId = UUID.randomUUID().toString();
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
@@ -460,13 +458,13 @@ public class RpcClient implements ClientProtocol {
         .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
         .build();
 
-    OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs);
+    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
     ChunkGroupOutputStream groupOutputStream =
         new ChunkGroupOutputStream.Builder()
             .setHandler(openKey)
             .setXceiverClientManager(xceiverClientManager)
             .setScmClient(storageContainerLocationClient)
-            .setKsmClient(keySpaceManagerClient)
+            .setOmClient(ozoneManagerClient)
             .setChunkSize(chunkSize)
             .setRequestID(requestId)
             .setType(HddsProtos.ReplicationType.valueOf(type.toString()))
@@ -485,14 +483,14 @@ public class RpcClient implements ClientProtocol {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
     String requestId = UUID.randomUUID().toString();
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
         .build();
-    KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs);
+    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
     LengthInputStream lengthInputStream =
-        ChunkGroupInputStream.getFromKsmKeyInfo(
+        ChunkGroupInputStream.getFromOmKeyInfo(
             keyInfo, xceiverClientManager, storageContainerLocationClient,
             requestId);
     return new OzoneInputStream(
@@ -505,12 +503,12 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
         .build();
-    keySpaceManagerClient.deleteKey(keyArgs);
+    ozoneManagerClient.deleteKey(keyArgs);
   }
 
   @Override
@@ -518,12 +516,12 @@ public class RpcClient implements ClientProtocol {
       String fromKeyName, String toKeyName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(fromKeyName)
         .build();
-    keySpaceManagerClient.renameKey(keyArgs, toKeyName);
+    ozoneManagerClient.renameKey(keyArgs, toKeyName);
   }
 
   @Override
@@ -531,7 +529,7 @@ public class RpcClient implements ClientProtocol {
                                  String keyPrefix, String prevKey,
                                  int maxListResult)
       throws IOException {
-    List<KsmKeyInfo> keys = keySpaceManagerClient.listKeys(
+    List<OmKeyInfo> keys = ozoneManagerClient.listKeys(
         volumeName, bucketName, prevKey, keyPrefix, maxListResult);
 
     return keys.stream().map(key -> new OzoneKey(
@@ -551,12 +549,12 @@ public class RpcClient implements ClientProtocol {
     Preconditions.checkNotNull(volumeName);
     Preconditions.checkNotNull(bucketName);
     Preconditions.checkNotNull(keyName);
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
         .build();
-    KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs);
+    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
     return new OzoneKey(keyInfo.getVolumeName(),
                         keyInfo.getBucketName(),
                         keyInfo.getKeyName(),
@@ -568,7 +566,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void close() throws IOException {
     IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
-    IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient);
+    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
     IOUtils.cleanupWithLogger(LOG, xceiverClientManager);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
index a270f61..3aefe8a 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.junit.Rule;
 import org.junit.Test;
@@ -30,7 +30,7 @@ import org.junit.rules.Timeout;
 import java.net.InetSocketAddress;
 
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 
@@ -79,27 +79,27 @@ public class TestHddsClientUtils {
   }
 
   @Test
-  public void testGetKSMAddress() {
+  public void testGetOmAddress() {
     final Configuration conf = new OzoneConfiguration();
 
     // First try a client address with just a host name. Verify it falls
     // back to the default port.
-    conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = getKsmAddress(conf);
+    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4");
+    InetSocketAddress addr = getOmAddress(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(KSMConfigKeys.OZONE_KSM_PORT_DEFAULT));
+    assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
 
     // Next try a client address with just a host name and port. Verify the port
-    // is ignored and the default KSM port is used.
-    conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "1.2.3.4:100");
-    addr = getKsmAddress(conf);
+    // is ignored and the default OM port is used.
+    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4:100");
+    addr = getOmAddress(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
     assertThat(addr.getPort(), is(100));
 
     // Assert the we are able to use default configs if no value is specified.
-    conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "");
-    addr = getKsmAddress(conf);
+    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "");
+    addr = getOmAddress(conf);
     assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(KSMConfigKeys.OZONE_KSM_PORT_DEFAULT));
+    assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index d8581d1..83d023e 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -69,7 +69,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <source>
                 <directory>${basedir}/src/main/proto</directory>
                 <includes>
-                  <include>KeySpaceManagerProtocol.proto</include>
+                  <include>OzoneManagerProtocol.proto</include>
                 </includes>
               </source>
             </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 390f089..9495eff 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -38,10 +38,9 @@ function hadoop_usage
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
   hadoop_add_subcommand "freon" client "runs an ozone data generator"
   hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning."
-  hadoop_add_subcommand "getozoneconf" client "get ozone config values from
-  configuration"
+  hadoop_add_subcommand "getozoneconf" client "get ozone config values from configuration"
   hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
-  hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
+  hadoop_add_subcommand "om" daemon "Ozone Manager"
   hadoop_add_subcommand "o3" client "command line interface for ozone"
   hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
@@ -94,9 +93,9 @@ function ozonecmd_case
     getozoneconf)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
     ;;
-    ksm)
+    om)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManager
     ;;
     oz)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/bin/start-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index 92bc4a8..29c3674 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -179,19 +179,19 @@ if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
 fi
 
 #---------------------------------------------------------
-# Ozone keyspacemanager nodes
-KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
-echo "Starting key space manager nodes [${KSM_NODES}]"
-if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
-  KSM_NODES=$(hostname)
+# Ozone ozonemanager nodes
+OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null)
+echo "Starting Ozone Manager nodes [${OM_NODES}]"
+if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
+  OM_NODES=$(hostname)
 fi
 
-hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
+hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${KSM_NODES}" \
+  --hostnames "${OM_NODES}" \
   --daemon start \
-  ksm
+  om
 
 HADOOP_JUMBO_RETCOUNTER=$?
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/bin/stop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
index be55be4..5f5faf0 100644
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
@@ -73,19 +73,19 @@ else
 fi
 
 #---------------------------------------------------------
-# Ozone keyspacemanager nodes
-KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
-echo "Stopping key space manager nodes [${KSM_NODES}]"
-if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
-  KSM_NODES=$(hostname)
+# Ozone Manager nodes
+OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null)
+echo "Stopping Ozone Manager nodes [${OM_NODES}]"
+if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
+  OM_NODES=$(hostname)
 fi
 
-hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
+hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${KSM_NODES}" \
+  --hostnames "${OM_NODES}" \
   --daemon stop \
-  ksm
+  om
 
 #---------------------------------------------------------
 # Ozone storagecontainermanager nodes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
deleted file mode 100644
index 1025963..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetUtils;
-
-import com.google.common.base.Optional;
-import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
-import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_BIND_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_BIND_HOST_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_PORT_DEFAULT;
-
-/**
- * Stateless helper functions for the server and client side of KSM
- * communication.
- */
-public final class KsmUtils {
-
-  private KsmUtils() {
-  }
-
-  /**
-   * Retrieve the socket address that is used by KSM.
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getKsmAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(OZONE_KSM_BIND_HOST_DEFAULT) + ":" +
-            getKsmRpcPort(conf));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to KSM.
-   * @param conf
-   * @return Target InetSocketAddress for the KSM service endpoint.
-   */
-  public static InetSocketAddress getKsmAddressForClients(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          OZONE_KSM_ADDRESS_KEY + " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
-              " details on configuring Ozone.");
-    }
-
-    return NetUtils.createSocketAddr(
-        host.get() + ":" + getKsmRpcPort(conf));
-  }
-
-  public static int getKsmRpcPort(Configuration conf) {
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-    return port.or(OZONE_KSM_PORT_DEFAULT);
-  }
-
-  public static int getKsmRestPort(Configuration conf) {
-    // If no port number is specified then we'll just try the default
-    // HTTP BindPort.
-    final Optional<Integer> port =
-        getPortNumberFromConfigKeys(conf, OZONE_KSM_HTTP_ADDRESS_KEY);
-    return port.or(OZONE_KSM_HTTP_BIND_PORT_DEFAULT);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
new file mode 100644
index 0000000..0974104
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+
+import com.google.common.base.Optional;
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT;
+
+/**
+ * Stateless helper functions for the server and client side of OM
+ * communication.
+ */
+public final class OmUtils {
+
+  private OmUtils() {
+  }
+
+  /**
+   * Retrieve the socket address that is used by OM.
+   * @param conf
+   * @return Target InetSocketAddress for the SCM service endpoint.
+   */
+  public static InetSocketAddress getOmAddress(
+      Configuration conf) {
+    final Optional<String> host = getHostNameFromConfigKeys(conf,
+        OZONE_OM_ADDRESS_KEY);
+
+    return NetUtils.createSocketAddr(
+        host.or(OZONE_OM_BIND_HOST_DEFAULT) + ":" +
+            getOmRpcPort(conf));
+  }
+
+  /**
+   * Retrieve the socket address that should be used by clients to connect
+   * to OM.
+   * @param conf
+   * @return Target InetSocketAddress for the OM service endpoint.
+   */
+  public static InetSocketAddress getOmAddressForClients(
+      Configuration conf) {
+    final Optional<String> host = getHostNameFromConfigKeys(conf,
+        OZONE_OM_ADDRESS_KEY);
+
+    if (!host.isPresent()) {
+      throw new IllegalArgumentException(
+          OZONE_OM_ADDRESS_KEY + " must be defined. See" +
+              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
+              " details on configuring Ozone.");
+    }
+
+    return NetUtils.createSocketAddr(
+        host.get() + ":" + getOmRpcPort(conf));
+  }
+
+  public static int getOmRpcPort(Configuration conf) {
+    // If no port number is specified then we'll just try the defaultBindPort.
+    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
+        OZONE_OM_ADDRESS_KEY);
+    return port.or(OZONE_OM_PORT_DEFAULT);
+  }
+
+  public static int getOmRestPort(Configuration conf) {
+    // If no port number is specified then we'll just try the default
+    // HTTP BindPort.
+    final Optional<Integer> port =
+        getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY);
+    return port.or(OZONE_OM_HTTP_BIND_PORT_DEFAULT);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
index d5f9093..ffbca6a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.KsmUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -53,8 +53,8 @@ public class OzoneGetConf extends Configured implements Tool {
     EXCLUDE_FILE("-excludeFile",
         "gets the exclude file path that defines the datanodes " +
             "that need to decommissioned."),
-    KEYSPACEMANAGER("-keyspacemanagers",
-        "gets list of ozone key space manager nodes in the cluster"),
+    OZONEMANAGER("-ozonemanagers",
+        "gets list of Ozone Manager nodes in the cluster"),
     STORAGECONTAINERMANAGER("-storagecontainermanagers",
         "gets list of ozone storage container manager nodes in the cluster"),
     CONFKEY("-confKey [key]", "gets a specific key from the configuration");
@@ -63,8 +63,8 @@ public class OzoneGetConf extends Configured implements Tool {
 
     static {
       HANDLERS = new HashMap<String, OzoneGetConf.CommandHandler>();
-      HANDLERS.put(StringUtils.toLowerCase(KEYSPACEMANAGER.getName()),
-          new KeySpaceManagersCommandHandler());
+      HANDLERS.put(StringUtils.toLowerCase(OZONEMANAGER.getName()),
+          new OzoneManagersCommandHandler());
       HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()),
           new StorageContainerManagersCommandHandler());
       HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()),
@@ -245,13 +245,13 @@ public class OzoneGetConf extends Configured implements Tool {
   }
 
   /**
-   * Handler for {@link Command#KEYSPACEMANAGER}.
+   * Handler for {@link Command#OZONEMANAGER}.
    */
-  static class KeySpaceManagersCommandHandler extends CommandHandler {
+  static class OzoneManagersCommandHandler extends CommandHandler {
     @Override
     public int doWorkInternal(OzoneGetConf tool, String[] args)
         throws IOException {
-      tool.printOut(KsmUtils.getKsmAddress(tool.getConf()).getHostName());
+      tool.printOut(OmUtils.getOmAddress(tool.getConf()).getHostName());
       return 0;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
deleted file mode 100644
index 75cf613..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-/**
- * KSM Constants.
- */
-public final class KSMConfigKeys {
-  /**
-   * Never constructed.
-   */
-  private KSMConfigKeys() {
-  }
-
-
-  public static final String OZONE_KSM_HANDLER_COUNT_KEY =
-      "ozone.ksm.handler.count.key";
-  public static final int OZONE_KSM_HANDLER_COUNT_DEFAULT = 20;
-
-  public static final String OZONE_KSM_ADDRESS_KEY =
-      "ozone.ksm.address";
-  public static final String OZONE_KSM_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-  public static final int OZONE_KSM_PORT_DEFAULT = 9862;
-
-  public static final String OZONE_KSM_HTTP_ENABLED_KEY =
-      "ozone.ksm.http.enabled";
-  public static final String OZONE_KSM_HTTP_BIND_HOST_KEY =
-      "ozone.ksm.http-bind-host";
-  public static final String OZONE_KSM_HTTPS_BIND_HOST_KEY =
-      "ozone.ksm.https-bind-host";
-  public static final String OZONE_KSM_HTTP_ADDRESS_KEY =
-      "ozone.ksm.http-address";
-  public static final String OZONE_KSM_HTTPS_ADDRESS_KEY =
-      "ozone.ksm.https-address";
-  public static final String OZONE_KSM_KEYTAB_FILE =
-      "ozone.ksm.keytab.file";
-  public static final String OZONE_KSM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_KSM_HTTP_BIND_PORT_DEFAULT = 9874;
-  public static final int OZONE_KSM_HTTPS_BIND_PORT_DEFAULT = 9875;
-
-  // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB.
-  public static final String OZONE_KSM_DB_CACHE_SIZE_MB =
-      "ozone.ksm.db.cache.size.mb";
-  public static final int OZONE_KSM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_KSM_USER_MAX_VOLUME =
-      "ozone.ksm.user.max.volume";
-  public static final int OZONE_KSM_USER_MAX_VOLUME_DEFAULT = 1024;
-
-  // KSM Default user/group permissions
-  public static final String OZONE_KSM_USER_RIGHTS =
-      "ozone.ksm.user.rights";
-  public static final OzoneAcl.OzoneACLRights OZONE_KSM_USER_RIGHTS_DEFAULT =
-      OzoneAcl.OzoneACLRights.READ_WRITE;
-
-  public static final String OZONE_KSM_GROUP_RIGHTS =
-      "ozone.ksm.group.rights";
-  public static final OzoneAcl.OzoneACLRights OZONE_KSM_GROUP_RIGHTS_DEFAULT =
-      OzoneAcl.OzoneACLRights.READ_WRITE;
-
-  public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
-      "ozone.key.deleting.limit.per.task";
-  public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java
deleted file mode 100644
index 1211b50..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketArgs;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
-
-/**
- * A class that encapsulates Bucket Arguments.
- */
-public final class KsmBucketArgs {
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String bucketName;
-  /**
-   * ACL's that are to be added for the bucket.
-   */
-  private List<OzoneAcl> addAcls;
-  /**
-   * ACL's that are to be removed from the bucket.
-   */
-  private List<OzoneAcl> removeAcls;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean isVersionEnabled;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @param addAcls - ACL's to be added.
-   * @param removeAcls - ACL's to be removed.
-   * @param isVersionEnabled - Bucket version flag.
-   * @param storageType - Storage type to be used.
-   */
-  private KsmBucketArgs(String volumeName, String bucketName,
-      List<OzoneAcl> addAcls, List<OzoneAcl> removeAcls,
-      Boolean isVersionEnabled, StorageType storageType) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.addAcls = addAcls;
-    this.removeAcls = removeAcls;
-    this.isVersionEnabled = isVersionEnabled;
-    this.storageType = storageType;
-  }
-
-  /**
-   * Returns the Volume Name.
-   * @return String.
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns the Bucket Name.
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Returns the ACL's that are to be added.
-   * @return List<OzoneAclInfo>
-   */
-  public List<OzoneAcl> getAddAcls() {
-    return addAcls;
-  }
-
-  /**
-   * Returns the ACL's that are to be removed.
-   * @return List<OzoneAclInfo>
-   */
-  public List<OzoneAcl> getRemoveAcls() {
-    return removeAcls;
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public Boolean getIsVersionEnabled() {
-    return isVersionEnabled;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns new builder class that builds a KsmBucketArgs.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder for KsmBucketArgs.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private List<OzoneAcl> addAcls;
-    private List<OzoneAcl> removeAcls;
-    private Boolean isVersionEnabled;
-    private StorageType storageType;
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setAddAcls(List<OzoneAcl> acls) {
-      this.addAcls = acls;
-      return this;
-    }
-
-    public Builder setRemoveAcls(List<OzoneAcl> acls) {
-      this.removeAcls = acls;
-      return this;
-    }
-
-    public Builder setIsVersionEnabled(Boolean versionFlag) {
-      this.isVersionEnabled = versionFlag;
-      return this;
-    }
-
-    public Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    /**
-     * Constructs the KsmBucketArgs.
-     * @return instance of KsmBucketArgs.
-     */
-    public KsmBucketArgs build() {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      return new KsmBucketArgs(volumeName, bucketName, addAcls,
-          removeAcls, isVersionEnabled, storageType);
-    }
-  }
-
-  /**
-   * Creates BucketArgs protobuf from KsmBucketArgs.
-   */
-  public BucketArgs getProtobuf() {
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setVolumeName(volumeName)
-        .setBucketName(bucketName);
-    if(addAcls != null && !addAcls.isEmpty()) {
-      builder.addAllAddAcls(addAcls.stream().map(
-          KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
-    }
-    if(removeAcls != null && !removeAcls.isEmpty()) {
-      builder.addAllRemoveAcls(removeAcls.stream().map(
-          KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
-    }
-    if(isVersionEnabled != null) {
-      builder.setIsVersionEnabled(isVersionEnabled);
-    }
-    if(storageType != null) {
-      builder.setStorageType(
-          PBHelperClient.convertStorageType(storageType));
-    }
-    return builder.build();
-  }
-
-  /**
-   * Parses BucketInfo protobuf and creates KsmBucketArgs.
-   * @param bucketArgs
-   * @return instance of KsmBucketArgs
-   */
-  public static KsmBucketArgs getFromProtobuf(BucketArgs bucketArgs) {
-    return new KsmBucketArgs(bucketArgs.getVolumeName(),
-        bucketArgs.getBucketName(),
-        bucketArgs.getAddAclsList().stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
-        bucketArgs.getRemoveAclsList().stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
-        bucketArgs.hasIsVersionEnabled() ?
-            bucketArgs.getIsVersionEnabled() : null,
-        bucketArgs.hasStorageType() ? PBHelperClient.convertStorageType(
-            bucketArgs.getStorageType()) : null);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java
deleted file mode 100644
index a49137a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * A class that encapsulates Bucket Info.
- */
-public final class KsmBucketInfo {
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String bucketName;
-  /**
-   * ACL Information.
-   */
-  private List<OzoneAcl> acls;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean isVersionEnabled;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-  /**
-   * Creation time of bucket.
-   */
-  private final long creationTime;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @param acls - list of ACLs.
-   * @param isVersionEnabled - Bucket version flag.
-   * @param storageType - Storage type to be used.
-   * @param creationTime - Bucket creation time.
-   */
-  private KsmBucketInfo(String volumeName, String bucketName,
-                        List<OzoneAcl> acls, boolean isVersionEnabled,
-                        StorageType storageType, long creationTime) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.acls = acls;
-    this.isVersionEnabled = isVersionEnabled;
-    this.storageType = storageType;
-    this.creationTime = creationTime;
-  }
-
-  /**
-   * Returns the Volume Name.
-   * @return String.
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns the Bucket Name.
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Returns the ACL's associated with this bucket.
-   * @return List<OzoneAcl>
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public boolean getIsVersionEnabled() {
-    return isVersionEnabled;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns creation time.
-   *
-   * @return long
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns new builder class that builds a KsmBucketInfo.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder for KsmBucketInfo.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private List<OzoneAcl> acls;
-    private Boolean isVersionEnabled;
-    private StorageType storageType;
-    private long creationTime;
-
-    Builder() {
-      //Default values
-      this.acls = new LinkedList<>();
-      this.isVersionEnabled = false;
-      this.storageType = StorageType.DISK;
-    }
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setAcls(List<OzoneAcl> listOfAcls) {
-      this.acls = listOfAcls;
-      return this;
-    }
-
-    public Builder setIsVersionEnabled(Boolean versionFlag) {
-      this.isVersionEnabled = versionFlag;
-      return this;
-    }
-
-    public Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    public Builder setCreationTime(long createdOn) {
-      this.creationTime = createdOn;
-      return this;
-    }
-
-    /**
-     * Constructs the KsmBucketInfo.
-     * @return instance of KsmBucketInfo.
-     */
-    public KsmBucketInfo build() {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(acls);
-      Preconditions.checkNotNull(isVersionEnabled);
-      Preconditions.checkNotNull(storageType);
-
-      return new KsmBucketInfo(volumeName, bucketName, acls,
-          isVersionEnabled, storageType, creationTime);
-    }
-  }
-
-  /**
-   * Creates BucketInfo protobuf from KsmBucketInfo.
-   */
-  public BucketInfo getProtobuf() {
-    return BucketInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .addAllAcls(acls.stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()))
-        .setIsVersionEnabled(isVersionEnabled)
-        .setStorageType(PBHelperClient.convertStorageType(
-            storageType))
-        .setCreationTime(creationTime)
-        .build();
-  }
-
-  /**
-   * Parses BucketInfo protobuf and creates KsmBucketInfo.
-   * @param bucketInfo
-   * @return instance of KsmBucketInfo
-   */
-  public static KsmBucketInfo getFromProtobuf(BucketInfo bucketInfo) {
-    return new KsmBucketInfo(
-        bucketInfo.getVolumeName(),
-        bucketInfo.getBucketName(),
-        bucketInfo.getAclsList().stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
-        bucketInfo.getIsVersionEnabled(),
-        PBHelperClient.convertStorageType(
-            bucketInfo.getStorageType()), bucketInfo.getCreationTime());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java
deleted file mode 100644
index cd17e28..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-
-/**
- * Args for key. Client use this to specify key's attributes on  key creation
- * (putKey()).
- */
-public final class KsmKeyArgs {
-  private final String volumeName;
-  private final String bucketName;
-  private final String keyName;
-  private long dataSize;
-  private final ReplicationType type;
-  private final ReplicationFactor factor;
-
-  private KsmKeyArgs(String volumeName, String bucketName, String keyName,
-      long dataSize, ReplicationType type, ReplicationFactor factor) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.dataSize = dataSize;
-    this.type = type;
-    this.factor = factor;
-  }
-
-  public ReplicationType getType() {
-    return type;
-  }
-
-  public ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  public void setDataSize(long size) {
-    dataSize = size;
-  }
-
-  /**
-   * Builder class of KsmKeyArgs.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private String keyName;
-    private long dataSize;
-    private ReplicationType type;
-    private ReplicationFactor factor;
-
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setKeyName(String key) {
-      this.keyName = key;
-      return this;
-    }
-
-    public Builder setDataSize(long size) {
-      this.dataSize = size;
-      return this;
-    }
-
-    public Builder setType(ReplicationType replicationType) {
-      this.type = replicationType;
-      return this;
-    }
-
-    public Builder setFactor(ReplicationFactor replicationFactor) {
-      this.factor = replicationFactor;
-      return this;
-    }
-
-    public KsmKeyArgs build() {
-      return new KsmKeyArgs(volumeName, bucketName, keyName, dataSize,
-          type, factor);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java
deleted file mode 100644
index 5d6e633..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.util.Time;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Args for key block. The block instance for the key requested in putKey.
- * This is returned from KSM to client, and client use class to talk to
- * datanode. Also, this is the metadata written to ksm.db on server side.
- */
-public final class KsmKeyInfo {
-  private final String volumeName;
-  private final String bucketName;
-  // name of key client specified
-  private String keyName;
-  private long dataSize;
-  private List<KsmKeyLocationInfoGroup> keyLocationVersions;
-  private final long creationTime;
-  private long modificationTime;
-  private HddsProtos.ReplicationType type;
-  private HddsProtos.ReplicationFactor factor;
-
-  private KsmKeyInfo(String volumeName, String bucketName, String keyName,
-      List<KsmKeyLocationInfoGroup> versions, long dataSize,
-      long creationTime, long modificationTime, HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.dataSize = dataSize;
-    // it is important that the versions are ordered from old to new.
-    // Do this sanity check when versions got loaded on creating KsmKeyInfo.
-    // TODO : this is not necessary, here only because versioning is still a
-    // work in-progress, remove this following check when versioning is
-    // complete and prove correctly functioning
-    long currentVersion = -1;
-    for (KsmKeyLocationInfoGroup version : versions) {
-      Preconditions.checkArgument(
-            currentVersion + 1 == version.getVersion());
-      currentVersion = version.getVersion();
-    }
-    this.keyLocationVersions = versions;
-    this.creationTime = creationTime;
-    this.modificationTime = modificationTime;
-    this.factor = factor;
-    this.type = type;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public HddsProtos.ReplicationType getType() {
-    return type;
-  }
-
-  public HddsProtos.ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public void setKeyName(String keyName) {
-    this.keyName = keyName;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  public void setDataSize(long size) {
-    this.dataSize = size;
-  }
-
-  public synchronized KsmKeyLocationInfoGroup getLatestVersionLocations()
-      throws IOException {
-    return keyLocationVersions.size() == 0? null :
-        keyLocationVersions.get(keyLocationVersions.size() - 1);
-  }
-
-  public List<KsmKeyLocationInfoGroup> getKeyLocationVersions() {
-    return keyLocationVersions;
-  }
-
-  public void updateModifcationTime() {
-    this.modificationTime = Time.monotonicNow();
-  }
-
-  /**
-   * Append a set of blocks to the latest version. Note that these blocks are
-   * part of the latest version, not a new version.
-   *
-   * @param newLocationList the list of new blocks to be added.
-   * @throws IOException
-   */
-  public synchronized void appendNewBlocks(
-      List<KsmKeyLocationInfo> newLocationList) throws IOException {
-    if (keyLocationVersions.size() == 0) {
-      throw new IOException("Appending new block, but no version exist");
-    }
-    KsmKeyLocationInfoGroup currentLatestVersion =
-        keyLocationVersions.get(keyLocationVersions.size() - 1);
-    currentLatestVersion.appendNewBlocks(newLocationList);
-    setModificationTime(Time.now());
-  }
-
-  /**
-   * Add a new set of blocks. The new blocks will be added as appending a new
-   * version to the all version list.
-   *
-   * @param newLocationList the list of new blocks to be added.
-   * @throws IOException
-   */
-  public synchronized long addNewVersion(
-      List<KsmKeyLocationInfo> newLocationList) throws IOException {
-    long latestVersionNum;
-    if (keyLocationVersions.size() == 0) {
-      // no version exist, these blocks are the very first version.
-      keyLocationVersions.add(new KsmKeyLocationInfoGroup(0, newLocationList));
-      latestVersionNum = 0;
-    } else {
-      // it is important that the new version are always at the tail of the list
-      KsmKeyLocationInfoGroup currentLatestVersion =
-          keyLocationVersions.get(keyLocationVersions.size() - 1);
-      // the new version is created based on the current latest version
-      KsmKeyLocationInfoGroup newVersion =
-          currentLatestVersion.generateNextVersion(newLocationList);
-      keyLocationVersions.add(newVersion);
-      latestVersionNum = newVersion.getVersion();
-    }
-    setModificationTime(Time.now());
-    return latestVersionNum;
-  }
-
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  public void setModificationTime(long modificationTime) {
-    this.modificationTime = modificationTime;
-  }
-
-  /**
-   * Builder of KsmKeyInfo.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private String keyName;
-    private long dataSize;
-    private List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups;
-    private long creationTime;
-    private long modificationTime;
-    private HddsProtos.ReplicationType type;
-    private HddsProtos.ReplicationFactor factor;
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setKeyName(String key) {
-      this.keyName = key;
-      return this;
-    }
-
-    public Builder setKsmKeyLocationInfos(
-        List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoList) {
-      this.ksmKeyLocationInfoGroups = ksmKeyLocationInfoList;
-      return this;
-    }
-
-    public Builder setDataSize(long size) {
-      this.dataSize = size;
-      return this;
-    }
-
-    public Builder setCreationTime(long crTime) {
-      this.creationTime = crTime;
-      return this;
-    }
-
-    public Builder setModificationTime(long mTime) {
-      this.modificationTime = mTime;
-      return this;
-    }
-
-    public Builder setReplicationFactor(HddsProtos.ReplicationFactor factor) {
-      this.factor = factor;
-      return this;
-    }
-
-    public Builder setReplicationType(HddsProtos.ReplicationType type) {
-      this.type = type;
-      return this;
-    }
-
-    public KsmKeyInfo build() {
-      return new KsmKeyInfo(
-          volumeName, bucketName, keyName, ksmKeyLocationInfoGroups,
-          dataSize, creationTime, modificationTime, type, factor);
-    }
-  }
-
-  public KeyInfo getProtobuf() {
-    long latestVersion = keyLocationVersions.size() == 0 ? -1 :
-        keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
-    return KeyInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(dataSize)
-        .setFactor(factor)
-        .setType(type)
-        .addAllKeyLocationList(keyLocationVersions.stream()
-            .map(KsmKeyLocationInfoGroup::getProtobuf)
-            .collect(Collectors.toList()))
-        .setLatestVersion(latestVersion)
-        .setCreationTime(creationTime)
-        .setModificationTime(modificationTime)
-        .build();
-  }
-
-  public static KsmKeyInfo getFromProtobuf(KeyInfo keyInfo) {
-    return new KsmKeyInfo(
-        keyInfo.getVolumeName(),
-        keyInfo.getBucketName(),
-        keyInfo.getKeyName(),
-        keyInfo.getKeyLocationListList().stream()
-            .map(KsmKeyLocationInfoGroup::getFromProtobuf)
-            .collect(Collectors.toList()),
-        keyInfo.getDataSize(),
-        keyInfo.getCreationTime(),
-        keyInfo.getModificationTime(),
-        keyInfo.getType(),
-        keyInfo.getFactor());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
deleted file mode 100644
index 45feda0..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocation;
-
-/**
- * One key can be too huge to fit in one container. In which case it gets split
- * into a number of subkeys. This class represents one such subkey instance.
- */
-public final class KsmKeyLocationInfo {
-  private final BlockID blockID;
-  private final boolean shouldCreateContainer;
-  // the id of this subkey in all the subkeys.
-  private final long length;
-  private final long offset;
-  // the version number indicating when this block was added
-  private long createVersion;
-
-  private KsmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer,
-      long length, long offset) {
-    this.blockID = blockID;
-    this.shouldCreateContainer = shouldCreateContainer;
-    this.length = length;
-    this.offset = offset;
-  }
-
-  public void setCreateVersion(long version) {
-    createVersion = version;
-  }
-
-  public long getCreateVersion() {
-    return createVersion;
-  }
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  public long getContainerID() {
-    return blockID.getContainerID();
-  }
-
-  public long getLocalID() {
-    return blockID.getLocalID();
-  }
-
-  public boolean getShouldCreateContainer() {
-    return shouldCreateContainer;
-  }
-
-  public long getLength() {
-    return length;
-  }
-
-  public long getOffset() {
-    return offset;
-  }
-
-  /**
-   * Builder of KsmKeyLocationInfo.
-   */
-  public static class Builder {
-    private BlockID blockID;
-    private boolean shouldCreateContainer;
-    private long length;
-    private long offset;
-
-    public Builder setBlockID(BlockID blockId) {
-      this.blockID = blockId;
-      return this;
-    }
-
-    public Builder setShouldCreateContainer(boolean create) {
-      this.shouldCreateContainer = create;
-      return this;
-    }
-
-    public Builder setLength(long len) {
-      this.length = len;
-      return this;
-    }
-
-    public Builder setOffset(long off) {
-      this.offset = off;
-      return this;
-    }
-
-    public KsmKeyLocationInfo build() {
-      return new KsmKeyLocationInfo(blockID,
-          shouldCreateContainer, length, offset);
-    }
-  }
-
-  public KeyLocation getProtobuf() {
-    return KeyLocation.newBuilder()
-        .setBlockID(blockID.getProtobuf())
-        .setShouldCreateContainer(shouldCreateContainer)
-        .setLength(length)
-        .setOffset(offset)
-        .setCreateVersion(createVersion)
-        .build();
-  }
-
-  public static KsmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
-    KsmKeyLocationInfo info = new KsmKeyLocationInfo(
-        BlockID.getFromProtobuf(keyLocation.getBlockID()),
-        keyLocation.getShouldCreateContainer(),
-        keyLocation.getLength(),
-        keyLocation.getOffset());
-    info.setCreateVersion(keyLocation.getCreateVersion());
-    return info;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
deleted file mode 100644
index 0facf3c..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocationList;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * A list of key locations. This class represents one single version of the
- * blocks of a key.
- */
-public class KsmKeyLocationInfoGroup {
-  private final long version;
-  private final List<KsmKeyLocationInfo> locationList;
-
-  public KsmKeyLocationInfoGroup(long version,
-      List<KsmKeyLocationInfo> locations) {
-    this.version = version;
-    this.locationList = locations;
-  }
-
-  /**
-   * Return only the blocks that are created in the most recent version.
-   *
-   * @return the list of blocks that are created in the latest version.
-   */
-  public List<KsmKeyLocationInfo> getBlocksLatestVersionOnly() {
-    List<KsmKeyLocationInfo> list = new ArrayList<>();
-    locationList.stream().filter(x -> x.getCreateVersion() == version)
-        .forEach(list::add);
-    return list;
-  }
-
-  public long getVersion() {
-    return version;
-  }
-
-  public List<KsmKeyLocationInfo> getLocationList() {
-    return locationList;
-  }
-
-  public KeyLocationList getProtobuf() {
-    return KeyLocationList.newBuilder()
-        .setVersion(version)
-        .addAllKeyLocations(
-            locationList.stream().map(KsmKeyLocationInfo::getProtobuf)
-                .collect(Collectors.toList()))
-        .build();
-  }
-
-  public static KsmKeyLocationInfoGroup getFromProtobuf(
-      KeyLocationList keyLocationList) {
-    return new KsmKeyLocationInfoGroup(
-        keyLocationList.getVersion(),
-        keyLocationList.getKeyLocationsList().stream()
-            .map(KsmKeyLocationInfo::getFromProtobuf)
-            .collect(Collectors.toList()));
-  }
-
-  /**
-   * Given a new block location, generate a new version list based upon this
-   * one.
-   *
-   * @param newLocationList a list of new location to be added.
-   * @return
-   */
-  KsmKeyLocationInfoGroup generateNextVersion(
-      List<KsmKeyLocationInfo> newLocationList) throws IOException {
-    // TODO : revisit if we can do this method more efficiently
-    // one potential inefficiency here is that later version always include
-    // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add
-    // more
-    List<KsmKeyLocationInfo> newList = new ArrayList<>();
-    newList.addAll(locationList);
-    for (KsmKeyLocationInfo newInfo : newLocationList) {
-      // all these new blocks will have addVersion of current version + 1
-      newInfo.setCreateVersion(version + 1);
-      newList.add(newInfo);
-    }
-    return new KsmKeyLocationInfoGroup(version + 1, newList);
-  }
-
-  void appendNewBlocks(List<KsmKeyLocationInfo> newLocationList)
-      throws IOException {
-    for (KsmKeyLocationInfo info : newLocationList) {
-      info.setCreateVersion(version);
-      locationList.add(info);
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("version:").append(version).append(" ");
-    for (KsmKeyLocationInfo kli : locationList) {
-      sb.append(kli.getLocalID()).append(" || ");
-    }
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java
deleted file mode 100644
index 7d9efad..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
-
-import java.util.List;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.ArrayList;
-import java.util.HashMap;
-
-/**
- * This helper class keeps a map of all user and their permissions.
- */
-public class KsmOzoneAclMap {
-  // per Acl Type user:rights map
-  private ArrayList<Map<String, OzoneAclRights>> aclMaps;
-
-  KsmOzoneAclMap() {
-    aclMaps = new ArrayList<>();
-    for (OzoneAclType aclType : OzoneAclType.values()) {
-      aclMaps.add(aclType.ordinal(), new HashMap<>());
-    }
-  }
-
-  private Map<String, OzoneAclRights> getMap(OzoneAclType type) {
-    return aclMaps.get(type.ordinal());
-  }
-
-  // For a given acl type and user, get the stored acl
-  private OzoneAclRights getAcl(OzoneAclType type, String user) {
-    return getMap(type).get(user);
-  }
-
-  // Add a new acl to the map
-  public void addAcl(OzoneAclInfo acl) {
-    getMap(acl.getType()).put(acl.getName(), acl.getRights());
-  }
-
-  // for a given acl, check if the user has access rights
-  public boolean hasAccess(OzoneAclInfo acl) {
-    OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName());
-    if (storedRights != null) {
-      switch (acl.getRights()) {
-      case READ:
-        return (storedRights == OzoneAclRights.READ)
-            || (storedRights == OzoneAclRights.READ_WRITE);
-      case WRITE:
-        return (storedRights == OzoneAclRights.WRITE)
-            || (storedRights == OzoneAclRights.READ_WRITE);
-      case READ_WRITE:
-        return (storedRights == OzoneAclRights.READ_WRITE);
-      default:
-        return false;
-      }
-    } else {
-      return false;
-    }
-  }
-
-  // Convert this map to OzoneAclInfo Protobuf List
-  public List<OzoneAclInfo> ozoneAclGetProtobuf() {
-    List<OzoneAclInfo> aclList = new LinkedList<>();
-    for (OzoneAclType type: OzoneAclType.values()) {
-      for (Map.Entry<String, OzoneAclRights> entry :
-          aclMaps.get(type.ordinal()).entrySet()) {
-        OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder()
-            .setName(entry.getKey())
-            .setType(type)
-            .setRights(entry.getValue())
-            .build();
-        aclList.add(aclInfo);
-      }
-    }
-
-    return aclList;
-  }
-
-  // Create map from list of OzoneAclInfos
-  public static KsmOzoneAclMap ozoneAclGetFromProtobuf(
-      List<OzoneAclInfo> aclList) {
-    KsmOzoneAclMap aclMap = new KsmOzoneAclMap();
-    for (OzoneAclInfo acl : aclList) {
-      aclMap.addAcl(acl);
-    }
-    return aclMap;
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-8492. ATSv2 HBase tests are failing with ClassNotFoundException. Contributed by Rohith Sharma K S.

Posted by bh...@apache.org.
YARN-8492. ATSv2 HBase tests are failing with ClassNotFoundException. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4bf38cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4bf38cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4bf38cf

Branch: refs/heads/HDDS-48
Commit: e4bf38cf50943565796c00f8b5711a2882813488
Parents: 498e3bf
Author: Sunil G <su...@apache.org>
Authored: Fri Jul 6 12:05:32 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Fri Jul 6 12:05:32 2018 -0700

----------------------------------------------------------------------
 .../pom.xml                                               | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4bf38cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index 8c143d3..05a5c65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -133,6 +133,10 @@
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-auth</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs-client</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -355,6 +359,12 @@
       <artifactId>jetty-webapp</artifactId>
       <scope>test</scope>
     </dependency>
+
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: YARN-7899. [AMRMProxy] Stateful FederationInterceptor for pending requests. Contributed by Botong Huang.

Posted by bh...@apache.org.
YARN-7899. [AMRMProxy] Stateful FederationInterceptor for pending requests. Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea9b6082
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea9b6082
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea9b6082

Branch: refs/heads/HDDS-48
Commit: ea9b608237e7f2cf9b1e36b0f78c9674ec84096f
Parents: e12d93b
Author: Giovanni Matteo Fumarola <gi...@apache.com>
Authored: Mon Jul 9 12:27:36 2018 -0700
Committer: Giovanni Matteo Fumarola <gi...@apache.com>
Committed: Mon Jul 9 12:27:36 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/client/AMRMClientUtils.java     |  91 ------------
 .../hadoop/yarn/server/AMRMClientRelayer.java   |   9 +-
 .../yarn/server/uam/UnmanagedAMPoolManager.java |  16 ++
 .../server/uam/UnmanagedApplicationManager.java |  40 ++---
 .../yarn/server/MockResourceManagerFacade.java  |  13 +-
 .../amrmproxy/FederationInterceptor.java        | 146 ++++++++++++++++---
 .../amrmproxy/BaseAMRMProxyTest.java            |   2 +
 .../amrmproxy/TestFederationInterceptor.java    |  17 +++
 8 files changed, 192 insertions(+), 142 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
index 387e399..5d4ab4a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
@@ -36,19 +36,9 @@ import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
-import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
-import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -68,87 +58,6 @@ public final class AMRMClientUtils {
   }
 
   /**
-   * Handle ApplicationNotRegistered exception and re-register.
-   *
-   * @param appId application Id
-   * @param rmProxy RM proxy instance
-   * @param registerRequest the AM re-register request
-   * @throws YarnException if re-register fails
-   */
-  public static void handleNotRegisteredExceptionAndReRegister(
-      ApplicationId appId, ApplicationMasterProtocol rmProxy,
-      RegisterApplicationMasterRequest registerRequest) throws YarnException {
-    LOG.info("App attempt {} not registered, most likely due to RM failover. "
-        + " Trying to re-register.", appId);
-    try {
-      rmProxy.registerApplicationMaster(registerRequest);
-    } catch (Exception e) {
-      if (e instanceof InvalidApplicationMasterRequestException
-          && e.getMessage().contains(APP_ALREADY_REGISTERED_MESSAGE)) {
-        LOG.info("Concurrent thread successfully registered, moving on.");
-      } else {
-        LOG.error("Error trying to re-register AM", e);
-        throw new YarnException(e);
-      }
-    }
-  }
-
-  /**
-   * Helper method for client calling ApplicationMasterProtocol.allocate that
-   * handles re-register if RM fails over.
-   *
-   * @param request allocate request
-   * @param rmProxy RM proxy
-   * @param registerRequest the register request for re-register
-   * @param appId application id
-   * @return allocate response
-   * @throws YarnException if RM call fails
-   * @throws IOException if RM call fails
-   */
-  public static AllocateResponse allocateWithReRegister(AllocateRequest request,
-      ApplicationMasterProtocol rmProxy,
-      RegisterApplicationMasterRequest registerRequest, ApplicationId appId)
-      throws YarnException, IOException {
-    try {
-      return rmProxy.allocate(request);
-    } catch (ApplicationMasterNotRegisteredException e) {
-      handleNotRegisteredExceptionAndReRegister(appId, rmProxy,
-          registerRequest);
-      // reset responseId after re-register
-      request.setResponseId(0);
-      // retry allocate
-      return allocateWithReRegister(request, rmProxy, registerRequest, appId);
-    }
-  }
-
-  /**
-   * Helper method for client calling
-   * ApplicationMasterProtocol.finishApplicationMaster that handles re-register
-   * if RM fails over.
-   *
-   * @param request finishApplicationMaster request
-   * @param rmProxy RM proxy
-   * @param registerRequest the register request for re-register
-   * @param appId application id
-   * @return finishApplicationMaster response
-   * @throws YarnException if RM call fails
-   * @throws IOException if RM call fails
-   */
-  public static FinishApplicationMasterResponse finishAMWithReRegister(
-      FinishApplicationMasterRequest request, ApplicationMasterProtocol rmProxy,
-      RegisterApplicationMasterRequest registerRequest, ApplicationId appId)
-      throws YarnException, IOException {
-    try {
-      return rmProxy.finishApplicationMaster(request);
-    } catch (ApplicationMasterNotRegisteredException ex) {
-      handleNotRegisteredExceptionAndReRegister(appId, rmProxy,
-          registerRequest);
-      // retry finishAM after re-register
-      return finishAMWithReRegister(request, rmProxy, registerRequest, appId);
-    }
-  }
-
-  /**
    * Create a proxy for the specified protocol.
    *
    * @param configuration Configuration to generate {@link ClientRMProxy}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
index e8a7f64..0d1a27e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
@@ -147,6 +147,11 @@ public class AMRMClientRelayer extends AbstractService
     super.serviceStop();
   }
 
+  public void setAMRegistrationRequest(
+      RegisterApplicationMasterRequest registerRequest) {
+    this.amRegistrationRequest = registerRequest;
+  }
+
   @Override
   public RegisterApplicationMasterResponse registerApplicationMaster(
       RegisterApplicationMasterRequest request)
@@ -259,8 +264,10 @@ public class AMRMClientRelayer extends AbstractService
           }
         }
 
-        // re register with RM, then retry allocate recursively
+        // re-register with RM, then retry allocate recursively
         registerApplicationMaster(this.amRegistrationRequest);
+        // Reset responseId after re-register
+        allocateRequest.setResponseId(0);
         return allocate(allocateRequest);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
index 02eef29..5f9d81b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.AMRMClientRelayer;
 import org.apache.hadoop.yarn.util.AsyncCallback;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -385,4 +386,19 @@ public class UnmanagedAMPoolManager extends AbstractService {
     return this.unmanagedAppMasterMap.containsKey(uamId);
   }
 
+  /**
+   * Return the rmProxy relayer of an UAM.
+   *
+   * @param uamId uam Id
+   * @return the rmProxy relayer
+   * @throws YarnException if fails
+   */
+  public AMRMClientRelayer getAMRMClientRelayer(String uamId)
+      throws YarnException {
+    if (!this.unmanagedAppMasterMap.containsKey(uamId)) {
+      throw new YarnException("UAM " + uamId + " does not exist");
+    }
+    return this.unmanagedAppMasterMap.get(uamId).getAMRMClientRelayer();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
index 73795dc..856a818 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.AMRMClientRelayer;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 import org.apache.hadoop.yarn.util.AsyncCallback;
@@ -90,7 +91,7 @@ public class UnmanagedApplicationManager {
 
   private BlockingQueue<AsyncAllocateRequestInfo> requestQueue;
   private AMRequestHandlerThread handlerThread;
-  private ApplicationMasterProtocol rmProxy;
+  private AMRMClientRelayer rmProxyRelayer;
   private ApplicationId applicationId;
   private String submitter;
   private String appNameSuffix;
@@ -138,7 +139,7 @@ public class UnmanagedApplicationManager {
     this.appNameSuffix = appNameSuffix;
     this.handlerThread = new AMRequestHandlerThread();
     this.requestQueue = new LinkedBlockingQueue<>();
-    this.rmProxy = null;
+    this.rmProxyRelayer = null;
     this.connectionInitiated = false;
     this.registerRequest = null;
     this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
@@ -190,8 +191,9 @@ public class UnmanagedApplicationManager {
       throws IOException {
     this.userUgi = UserGroupInformation.createProxyUser(
         this.applicationId.toString(), UserGroupInformation.getCurrentUser());
-    this.rmProxy = createRMProxy(ApplicationMasterProtocol.class, this.conf,
-        this.userUgi, amrmToken);
+    this.rmProxyRelayer =
+        new AMRMClientRelayer(createRMProxy(ApplicationMasterProtocol.class,
+            this.conf, this.userUgi, amrmToken));
   }
 
   /**
@@ -209,19 +211,18 @@ public class UnmanagedApplicationManager {
     // Save the register request for re-register later
     this.registerRequest = request;
 
-    // Since we have setKeepContainersAcrossApplicationAttempts = true for UAM.
-    // We do not expect application already registered exception here
     LOG.info("Registering the Unmanaged application master {}",
         this.applicationId);
     RegisterApplicationMasterResponse response =
-        this.rmProxy.registerApplicationMaster(this.registerRequest);
+        this.rmProxyRelayer.registerApplicationMaster(this.registerRequest);
+    this.lastResponseId = 0;
 
     for (Container container : response.getContainersFromPreviousAttempts()) {
-      LOG.info("RegisterUAM returned existing running container "
+      LOG.debug("RegisterUAM returned existing running container "
           + container.getId());
     }
     for (NMToken nmToken : response.getNMTokensFromPreviousAttempts()) {
-      LOG.info("RegisterUAM returned existing NM token for node "
+      LOG.debug("RegisterUAM returned existing NM token for node "
           + nmToken.getNodeId());
     }
 
@@ -249,7 +250,7 @@ public class UnmanagedApplicationManager {
 
     this.handlerThread.shutdown();
 
-    if (this.rmProxy == null) {
+    if (this.rmProxyRelayer == null) {
       if (this.connectionInitiated) {
         // This is possible if the async launchUAM is still
         // blocked and retrying. Return a dummy response in this case.
@@ -261,8 +262,7 @@ public class UnmanagedApplicationManager {
             + "be called before createAndRegister");
       }
     }
-    return AMRMClientUtils.finishAMWithReRegister(request, this.rmProxy,
-        this.registerRequest, this.applicationId);
+    return this.rmProxyRelayer.finishApplicationMaster(request);
   }
 
   /**
@@ -308,7 +308,7 @@ public class UnmanagedApplicationManager {
     //
     // In case 2, we have already save the allocate request above, so if the
     // registration succeed later, no request is lost.
-    if (this.rmProxy == null) {
+    if (this.rmProxyRelayer == null) {
       if (this.connectionInitiated) {
         LOG.info("Unmanaged AM still not successfully launched/registered yet."
             + " Saving the allocate request and send later.");
@@ -329,6 +329,15 @@ public class UnmanagedApplicationManager {
   }
 
   /**
+   * Returns the rmProxy relayer of this UAM.
+   *
+   * @return rmProxy relayer of the UAM
+   */
+  public AMRMClientRelayer getAMRMClientRelayer() {
+    return this.rmProxyRelayer;
+  }
+
+  /**
    * Returns RM proxy for the specified protocol type. Unit test cases can
    * override this method and return mock proxy instances.
    *
@@ -592,10 +601,7 @@ public class UnmanagedApplicationManager {
           }
 
           request.setResponseId(lastResponseId);
-
-          AllocateResponse response = AMRMClientUtils.allocateWithReRegister(
-              request, rmProxy, registerRequest, applicationId);
-
+          AllocateResponse response = rmProxyRelayer.allocate(request);
           if (response == null) {
             throw new YarnException("Null allocateResponse from allocate");
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index 23cd3e2..9b4d91d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -251,8 +251,6 @@ public class MockResourceManagerFacade implements ApplicationClientProtocol,
     ApplicationAttemptId attemptId = getAppIdentifier();
     LOG.info("Registering application attempt: " + attemptId);
 
-    shouldReRegisterNext = false;
-
     List<Container> containersFromPreviousAttempt = null;
 
     synchronized (applicationContainerIdMap) {
@@ -266,7 +264,7 @@ public class MockResourceManagerFacade implements ApplicationClientProtocol,
             containersFromPreviousAttempt.add(Container.newInstance(containerId,
                 null, null, null, null, null));
           }
-        } else {
+        } else if (!shouldReRegisterNext) {
           throw new InvalidApplicationMasterRequestException(
               AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE);
         }
@@ -276,6 +274,8 @@ public class MockResourceManagerFacade implements ApplicationClientProtocol,
       }
     }
 
+    shouldReRegisterNext = false;
+
     // Make sure we wait for certain test cases last in the method
     synchronized (syncObj) {
       syncObj.notifyAll();
@@ -339,13 +339,6 @@ public class MockResourceManagerFacade implements ApplicationClientProtocol,
 
     validateRunning();
 
-    if (request.getAskList() != null && request.getAskList().size() > 0
-        && request.getReleaseList() != null
-        && request.getReleaseList().size() > 0) {
-      Assert.fail("The mock RM implementation does not support receiving "
-          + "askList and releaseList in the same heartbeat");
-    }
-
     ApplicationAttemptId attemptId = getAppIdentifier();
     LOG.info("Allocate from application attempt: " + attemptId);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
index 5740749..645e47e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
@@ -62,14 +62,15 @@ import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
-import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProto;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.AMRMClientRelayer;
 import org.apache.hadoop.yarn.server.federation.failover.FederationProxyProviderUtil;
 import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils;
 import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
@@ -106,9 +107,9 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
   public static final String NMSS_REG_RESPONSE_KEY =
       NMSS_CLASS_PREFIX + "registerResponse";
 
-  /*
+  /**
    * When AMRMProxy HA is enabled, secondary AMRMTokens will be stored in Yarn
-   * Registry. Otherwise if NM recovery is enabled, the UAM token are store in
+   * Registry. Otherwise if NM recovery is enabled, the UAM token are stored in
    * local NMSS instead under this directory name.
    */
   public static final String NMSS_SECONDARY_SC_PREFIX =
@@ -119,8 +120,23 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
    * The home sub-cluster is the sub-cluster where the AM container is running
    * in.
    */
-  private ApplicationMasterProtocol homeRM;
+  private AMRMClientRelayer homeRMRelayer;
   private SubClusterId homeSubClusterId;
+  private volatile int lastHomeResponseId;
+
+  /**
+   * A flag for work preserving NM restart. If we just recovered, we need to
+   * generate an {@link ApplicationMasterNotRegisteredException} exception back
+   * to AM (similar to what RM will do after its restart/fail-over) in its next
+   * allocate to trigger AM re-register (which we will shield from RM and just
+   * return our saved register response) and a full pending requests re-send, so
+   * that all the {@link AMRMClientRelayer} will be re-populated with all
+   * pending requests.
+   *
+   * TODO: When split-merge is not idempotent, this can lead to some
+   * over-allocation without a full cancel to RM.
+   */
+  private volatile boolean justRecovered;
 
   /**
    * UAM pool for secondary sub-clusters (ones other than home sub-cluster),
@@ -134,6 +150,12 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
    */
   private UnmanagedAMPoolManager uamPool;
 
+  /**
+   * The rmProxy relayers for secondary sub-clusters that keep track of all
+   * pending requests.
+   */
+  private Map<String, AMRMClientRelayer> secondaryRelayers;
+
   /** Thread pool used for asynchronous operations. */
   private ExecutorService threadpool;
 
@@ -186,8 +208,11 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
     this.asyncResponseSink = new ConcurrentHashMap<>();
     this.threadpool = Executors.newCachedThreadPool();
     this.uamPool = createUnmanagedAMPoolManager(this.threadpool);
+    this.secondaryRelayers = new ConcurrentHashMap<>();
     this.amRegistrationRequest = null;
     this.amRegistrationResponse = null;
+    this.lastHomeResponseId = Integer.MAX_VALUE;
+    this.justRecovered = false;
   }
 
   /**
@@ -224,8 +249,8 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
 
     this.homeSubClusterId =
         SubClusterId.newInstance(YarnConfiguration.getClusterId(conf));
-    this.homeRM = createHomeRMProxy(appContext, ApplicationMasterProtocol.class,
-        this.appOwner);
+    this.homeRMRelayer = new AMRMClientRelayer(createHomeRMProxy(appContext,
+        ApplicationMasterProtocol.class, this.appOwner));
 
     this.federationFacade = FederationStateStoreFacade.getInstance();
     this.subClusterResolver = this.federationFacade.getSubClusterResolver();
@@ -240,13 +265,12 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
   @Override
   public void recover(Map<String, byte[]> recoveredDataMap) {
     super.recover(recoveredDataMap);
-    LOG.info("Recovering data for FederationInterceptor");
+    ApplicationAttemptId attemptId =
+        getApplicationContext().getApplicationAttemptId();
+    LOG.info("Recovering data for FederationInterceptor for {}", attemptId);
     if (recoveredDataMap == null) {
       return;
     }
-
-    ApplicationAttemptId attemptId =
-        getApplicationContext().getApplicationAttemptId();
     try {
       if (recoveredDataMap.containsKey(NMSS_REG_REQUEST_KEY)) {
         RegisterApplicationMasterRequestProto pb =
@@ -255,6 +279,9 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
         this.amRegistrationRequest =
             new RegisterApplicationMasterRequestPBImpl(pb);
         LOG.info("amRegistrationRequest recovered for {}", attemptId);
+
+        // Give the register request to homeRMRelayer for future re-registration
+        this.homeRMRelayer.setAMRegistrationRequest(this.amRegistrationRequest);
       }
       if (recoveredDataMap.containsKey(NMSS_REG_RESPONSE_KEY)) {
         RegisterApplicationMasterResponseProto pb =
@@ -263,6 +290,9 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
         this.amRegistrationResponse =
             new RegisterApplicationMasterResponsePBImpl(pb);
         LOG.info("amRegistrationResponse recovered for {}", attemptId);
+        // Trigger re-register and full pending re-send only if we have a
+        // saved register response. This should always be true though.
+        this.justRecovered = true;
       }
 
       // Recover UAM amrmTokens from registry or NMSS
@@ -309,6 +339,9 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
               getApplicationContext().getUser(), this.homeSubClusterId.getId(),
               entry.getValue());
 
+          this.secondaryRelayers.put(subClusterId.getId(),
+              this.uamPool.getAMRMClientRelayer(subClusterId.getId()));
+
           RegisterApplicationMasterResponse response =
               this.uamPool.registerApplicationMaster(subClusterId.getId(),
                   this.amRegistrationRequest);
@@ -436,7 +469,7 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
      * the other sub-cluster RM will be done lazily as needed later.
      */
     this.amRegistrationResponse =
-        this.homeRM.registerApplicationMaster(request);
+        this.homeRMRelayer.registerApplicationMaster(request);
     if (this.amRegistrationResponse
         .getContainersFromPreviousAttempts() != null) {
       cacheAllocatedContainers(
@@ -495,6 +528,34 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
     Preconditions.checkArgument(this.policyInterpreter != null,
         "Allocate should be called after registerApplicationMaster");
 
+    if (this.justRecovered && this.lastHomeResponseId == Integer.MAX_VALUE) {
+      // Save the responseId home RM is expecting
+      this.lastHomeResponseId = request.getResponseId();
+
+      throw new ApplicationMasterNotRegisteredException(
+          "AMRMProxy just restarted and recovered for "
+              + getApplicationContext().getApplicationAttemptId()
+              + ". AM should re-register and full re-send pending requests.");
+    }
+
+    // Override responseId in the request in two cases:
+    //
+    // 1. After we just recovered after an NM restart and AM's responseId is
+    // reset due to the exception we generate. We need to override the
+    // responseId to the one homeRM expects.
+    //
+    // 2. After homeRM fail-over, the allocate response with reseted responseId
+    // might not be returned successfully back to AM because of RPC connection
+    // timeout between AM and AMRMProxy. In this case, we remember and reset the
+    // responseId for AM.
+    if (this.justRecovered
+        || request.getResponseId() > this.lastHomeResponseId) {
+      LOG.warn("Setting allocate responseId for {} from {} to {}",
+          getApplicationContext().getApplicationAttemptId(),
+          request.getResponseId(), this.lastHomeResponseId);
+      request.setResponseId(this.lastHomeResponseId);
+    }
+
     try {
       // Split the heart beat request into multiple requests, one for each
       // sub-cluster RM that is used by this application.
@@ -509,10 +570,18 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
           sendRequestsToSecondaryResourceManagers(requests);
 
       // Send the request to the home RM and get the response
-      AllocateResponse homeResponse = AMRMClientUtils.allocateWithReRegister(
-          requests.get(this.homeSubClusterId), this.homeRM,
-          this.amRegistrationRequest,
-          getApplicationContext().getApplicationAttemptId().getApplicationId());
+      AllocateRequest homeRequest = requests.get(this.homeSubClusterId);
+      LOG.info("{} heartbeating to home RM with responseId {}",
+          getApplicationContext().getApplicationAttemptId(),
+          homeRequest.getResponseId());
+
+      AllocateResponse homeResponse = this.homeRMRelayer.allocate(homeRequest);
+
+      // Reset the flag after the first successful homeRM allocate response,
+      // otherwise keep overriding the responseId of new allocate request
+      if (this.justRecovered) {
+        this.justRecovered = false;
+      }
 
       // Notify policy of home response
       try {
@@ -540,6 +609,22 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
             newRegistrations.getSuccessfulRegistrations());
       }
 
+      LOG.info("{} heartbeat response from home RM with responseId {}",
+          getApplicationContext().getApplicationAttemptId(),
+          homeResponse.getResponseId());
+
+      // Update lastHomeResponseId in three cases:
+      // 1. The normal responseId increments
+      // 2. homeResponse.getResponseId() == 1. This happens when homeRM fails
+      // over, AMRMClientRelayer auto re-register and full re-send for homeRM.
+      // 3. lastHomeResponseId == MAX_INT. This is the initial case or
+      // responseId about to overflow and wrap around
+      if (homeResponse.getResponseId() == this.lastHomeResponseId + 1
+          || homeResponse.getResponseId() == 1
+          || this.lastHomeResponseId == Integer.MAX_VALUE) {
+        this.lastHomeResponseId = homeResponse.getResponseId();
+      }
+
       // return the final response to the application master.
       return homeResponse;
     } catch (IOException ex) {
@@ -584,6 +669,16 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
             try {
               uamResponse =
                   uamPool.finishApplicationMaster(subClusterId, finishRequest);
+
+              if (uamResponse.getIsUnregistered()) {
+                secondaryRelayers.remove(subClusterId);
+
+                if (getNMStateStore() != null) {
+                  getNMStateStore().removeAMRMProxyAppContextEntry(
+                      getApplicationContext().getApplicationAttemptId(),
+                      NMSS_SECONDARY_SC_PREFIX + subClusterId);
+                }
+              }
             } catch (Throwable e) {
               LOG.warn("Failed to finish unmanaged application master: "
                   + "RM address: " + subClusterId + " ApplicationId: "
@@ -600,9 +695,7 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
     // asynchronously by other sub-cluster resource managers, send the same
     // request to the home resource manager on this thread.
     FinishApplicationMasterResponse homeResponse =
-        AMRMClientUtils.finishAMWithReRegister(request, this.homeRM,
-            this.amRegistrationRequest, getApplicationContext()
-                .getApplicationAttemptId().getApplicationId());
+        this.homeRMRelayer.finishApplicationMaster(request);
 
     if (subClusterIds.size() > 0) {
       // Wait for other sub-cluster resource managers to return the
@@ -621,10 +714,6 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
           if (uamResponse.getResponse() == null
               || !uamResponse.getResponse().getIsUnregistered()) {
             failedToUnRegister = true;
-          } else if (getNMStateStore() != null) {
-            getNMStateStore().removeAMRMProxyAppContextEntry(
-                getApplicationContext().getApplicationAttemptId(),
-                NMSS_SECONDARY_SC_PREFIX + uamResponse.getSubClusterId());
           }
         } catch (Throwable e) {
           failedToUnRegister = true;
@@ -689,6 +778,11 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
     return this.registryClient;
   }
 
+  @VisibleForTesting
+  protected int getLastHomeResponseId() {
+    return this.lastHomeResponseId;
+  }
+
   /**
    * Create the UAM pool manager for secondary sub-clsuters. For unit test to
    * override.
@@ -800,6 +894,9 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
                     getApplicationContext().getUser(), homeSubClusterId.getId(),
                     amrmToken);
 
+                secondaryRelayers.put(subClusterId.getId(),
+                    uamPool.getAMRMClientRelayer(subClusterId.getId()));
+
                 response = uamPool.registerApplicationMaster(
                     subClusterId.getId(), amRegistrationRequest);
 
@@ -1098,7 +1195,10 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
                   token = uamPool.launchUAM(subClusterId, config,
                       appContext.getApplicationAttemptId().getApplicationId(),
                       amRegistrationResponse.getQueue(), appContext.getUser(),
-                      homeSubClusterId.toString(), registryClient != null);
+                      homeSubClusterId.toString(), true);
+
+                  secondaryRelayers.put(subClusterId,
+                      uamPool.getAMRMClientRelayer(subClusterId));
 
                   uamResponse = uamPool.registerApplicationMaster(subClusterId,
                       registerRequest);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 677732d..2794857 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -536,6 +537,7 @@ public abstract class BaseAMRMProxyTest {
     capability.setMemorySize(memory);
     capability.setVirtualCores(vCores);
     req.setCapability(capability);
+    req.setExecutionTypeRequest(ExecutionTypeRequest.newInstance());
     if (labelExpression != null) {
       req.setNodeLabelExpression(labelExpression);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java
index eefaba1..a837eed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MockResourceManagerFacade;
@@ -516,6 +517,22 @@ public class TestFederationInterceptor extends BaseAMRMProxyTest {
         interceptor.recover(recoveredDataMap);
 
         Assert.assertEquals(1, interceptor.getUnmanagedAMPoolSize());
+        Assert.assertEquals(Integer.MAX_VALUE,
+            interceptor.getLastHomeResponseId());
+
+        // The first allocate call expects a fail-over exception and re-register
+        int responseId = 10;
+        AllocateRequest allocateRequest =
+            Records.newRecord(AllocateRequest.class);
+        allocateRequest.setResponseId(responseId);
+        try {
+          interceptor.allocate(allocateRequest);
+          Assert.fail("Expecting an ApplicationMasterNotRegisteredException  "
+              + " after FederationInterceptor restarts and recovers");
+        } catch (ApplicationMasterNotRegisteredException e) {
+        }
+        interceptor.registerApplicationMaster(registerReq);
+        Assert.assertEquals(responseId, interceptor.getLastHomeResponseId());
 
         // Release all containers
         releaseContainersAndAssert(containers);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java
deleted file mode 100644
index 15c3fd3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.LinkedList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class tests the versioning of blocks from KSM side.
- */
-public class TestKsmBlockVersioning {
-  private static MiniOzoneCluster cluster = null;
-  private static UserArgs userArgs;
-  private static OzoneConfiguration conf;
-  private static KeySpaceManager keySpaceManager;
-  private static StorageHandler storageHandler;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    keySpaceManager = cluster.getKeySpaceManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testAllocateCommit() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(1000)
-        .build();
-
-    // 1st update, version 0
-    OpenKeySession openKey = keySpaceManager.openKey(keyArgs);
-    keySpaceManager.commitKey(keyArgs, openKey.getId());
-
-    KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs);
-    KsmKeyLocationInfoGroup highestVersion =
-        checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(0, highestVersion.getVersion());
-    assertEquals(1, highestVersion.getLocationList().size());
-
-    // 2nd update, version 1
-    openKey = keySpaceManager.openKey(keyArgs);
-    //KsmKeyLocationInfo locationInfo =
-    //    keySpaceManager.allocateBlock(keyArgs, openKey.getId());
-    keySpaceManager.commitKey(keyArgs, openKey.getId());
-
-    keyInfo = keySpaceManager.lookupKey(keyArgs);
-    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(1, highestVersion.getVersion());
-    assertEquals(2, highestVersion.getLocationList().size());
-
-    // 3rd update, version 2
-    openKey = keySpaceManager.openKey(keyArgs);
-    // this block will be appended to the latest version of version 2.
-    keySpaceManager.allocateBlock(keyArgs, openKey.getId());
-    keySpaceManager.commitKey(keyArgs, openKey.getId());
-
-    keyInfo = keySpaceManager.lookupKey(keyArgs);
-    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(2, highestVersion.getVersion());
-    assertEquals(4, highestVersion.getLocationList().size());
-  }
-
-  private KsmKeyLocationInfoGroup checkVersions(
-      List<KsmKeyLocationInfoGroup> versions) {
-    KsmKeyLocationInfoGroup currentVersion = null;
-    for (KsmKeyLocationInfoGroup version : versions) {
-      if (currentVersion != null) {
-        assertEquals(currentVersion.getVersion() + 1, version.getVersion());
-        for (KsmKeyLocationInfo info : currentVersion.getLocationList()) {
-          boolean found = false;
-          // all the blocks from the previous version must present in the next
-          // version
-          for (KsmKeyLocationInfo info2 : version.getLocationList()) {
-            if (info.getLocalID() == info2.getLocalID()) {
-              found = true;
-              break;
-            }
-          }
-          assertTrue(found);
-        }
-      }
-      currentVersion = version;
-    }
-    return currentVersion;
-  }
-
-  @Test
-  public void testReadLatestVersion() throws Exception {
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(1000)
-        .build();
-
-    String dataString = RandomStringUtils.randomAlphabetic(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    // this write will create 1st version with one block
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    KsmKeyInfo keyInfo = keySpaceManager.lookupKey(ksmKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
-    assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(1,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-
-    // this write will create 2nd version, 2nd version will contain block from
-    // version 1, and add a new block
-    dataString = RandomStringUtils.randomAlphabetic(10);
-    data = new byte[dataString.length()];
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    keyInfo = keySpaceManager.lookupKey(ksmKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
-    assertEquals(1, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(2,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-
-    dataString = RandomStringUtils.randomAlphabetic(200);
-    data = new byte[dataString.length()];
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    keyInfo = keySpaceManager.lookupKey(ksmKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
-    assertEquals(2, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(3,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java
deleted file mode 100644
index 1cb6e82..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.LinkedList;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test key write/read where a key can span multiple containers.
- */
-public class TestMultipleContainerReadWrite {
-  private static MiniOzoneCluster cluster = null;
-  private static StorageHandler storageHandler;
-  private static UserArgs userArgs;
-  private static OzoneConfiguration conf;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    // set to as small as 100 bytes per block.
-    conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 1);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5);
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testWriteRead() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(3 * (int)OzoneConsts.MB);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      outputStream.write(dataString.getBytes());
-    }
-
-    byte[] data = new byte[dataString.length()];
-    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
-      inputStream.read(data, 0, data.length);
-    }
-    assertEquals(dataString, new String(data));
-    // checking whether container meta data has the chunk file persisted.
-    MetricsRecordBuilder containerMetrics = getMetrics(
-        "StorageContainerMetrics");
-    assertCounter("numWriteChunk", 3L, containerMetrics);
-    assertCounter("numReadChunk", 3L, containerMetrics);
-  }
-
-  // Disable this test, because this tests assumes writing beyond a specific
-  // size is not allowed. Which is not true for now. Keeping this test in case
-  // we add this restrict in the future.
-  @Ignore
-  @Test
-  public void testErrorWrite() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString1 = RandomStringUtils.randomAscii(100);
-    String dataString2 = RandomStringUtils.randomAscii(500);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(500);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      // first write will write succeed
-      outputStream.write(dataString1.getBytes());
-      // second write
-      exception.expect(IOException.class);
-      exception.expectMessage(
-          "Can not write 500 bytes with only 400 byte space");
-      outputStream.write(dataString2.getBytes());
-    }
-  }
-
-  @Test
-  public void testPartialRead() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(500);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(500);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      outputStream.write(dataString.getBytes());
-    }
-
-    byte[] data = new byte[600];
-    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
-      int readLen = inputStream.read(data, 0, 340);
-      assertEquals(340, readLen);
-      assertEquals(dataString.substring(0, 340),
-          new String(data).substring(0, 340));
-
-      readLen = inputStream.read(data, 340, 260);
-      assertEquals(160, readLen);
-      assertEquals(dataString, new String(data).substring(0, 500));
-
-      readLen = inputStream.read(data, 500, 1);
-      assertEquals(-1, readLen);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
new file mode 100644
index 0000000..5481506
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * This class tests container report with DN container state info.
+ */
+public class TestContainerReportWithKeys {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      TestContainerReportWithKeys.class);
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static StorageContainerManager scm;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    scm = cluster.getStorageContainerManager();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testContainerReportKeyWrite() throws Exception {
+    final String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    final String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    final int keySize = 100;
+
+    OzoneClient client = OzoneClientFactory.getClient(conf);
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+    objectStore.getVolume(volumeName).createBucket(bucketName);
+    OzoneOutputStream key =
+        objectStore.getVolume(volumeName).getBucket(bucketName)
+            .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
+                ReplicationFactor.ONE);
+    String dataString = RandomStringUtils.randomAlphabetic(keySize);
+    key.write(dataString.getBytes());
+    key.close();
+
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setType(HddsProtos.ReplicationType.STAND_ALONE)
+        .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize)
+        .build();
+
+
+    OmKeyLocationInfo keyInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
+            .get(0).getBlocksLatestVersionOnly().get(0);
+
+    ContainerData cd = getContainerData(keyInfo.getContainerID());
+
+    LOG.info("DN Container Data:  keyCount: {} used: {} ",
+        cd.getKeyCount(), cd.getBytesUsed());
+
+    ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
+
+    LOG.info("SCM Container Info keyCount: {} usedBytes: {}",
+        cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
+  }
+
+
+  private static ContainerData getContainerData(long containerID) {
+    ContainerData containerData;
+    try {
+      ContainerManager containerManager = cluster.getHddsDatanodes().get(0)
+          .getDatanodeStateMachine().getContainer().getContainerManager();
+      containerData = containerManager.readContainer(containerID);
+    } catch (StorageContainerException e) {
+      throw new AssertionError(e);
+    }
+    return containerData;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java
new file mode 100644
index 0000000..1389cba
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.LinkedList;
+
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test key write/read where a key can span multiple containers.
+ */
+public class TestMultipleContainerReadWrite {
+  private static MiniOzoneCluster cluster = null;
+  private static StorageHandler storageHandler;
+  private static UserArgs userArgs;
+  private static OzoneConfiguration conf;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    // set to as small as 100 bytes per block.
+    conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 1);
+    conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testWriteRead() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(3 * (int)OzoneConsts.MB);
+
+    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
+      outputStream.write(dataString.getBytes());
+    }
+
+    byte[] data = new byte[dataString.length()];
+    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
+      inputStream.read(data, 0, data.length);
+    }
+    assertEquals(dataString, new String(data));
+    // checking whether container meta data has the chunk file persisted.
+    MetricsRecordBuilder containerMetrics = getMetrics(
+        "StorageContainerMetrics");
+    assertCounter("numWriteChunk", 3L, containerMetrics);
+    assertCounter("numReadChunk", 3L, containerMetrics);
+  }
+
+  // Disable this test, because this tests assumes writing beyond a specific
+  // size is not allowed. Which is not true for now. Keeping this test in case
+  // we add this restrict in the future.
+  @Ignore
+  @Test
+  public void testErrorWrite() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString1 = RandomStringUtils.randomAscii(100);
+    String dataString2 = RandomStringUtils.randomAscii(500);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(500);
+
+    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
+      // first write will write succeed
+      outputStream.write(dataString1.getBytes());
+      // second write
+      exception.expect(IOException.class);
+      exception.expectMessage(
+          "Can not write 500 bytes with only 400 byte space");
+      outputStream.write(dataString2.getBytes());
+    }
+  }
+
+  @Test
+  public void testPartialRead() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(500);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(500);
+
+    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
+      outputStream.write(dataString.getBytes());
+    }
+
+    byte[] data = new byte[600];
+    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
+      int readLen = inputStream.read(data, 0, 340);
+      assertEquals(340, readLen);
+      assertEquals(dataString.substring(0, 340),
+          new String(data).substring(0, 340));
+
+      readLen = inputStream.read(data, 340, 260);
+      assertEquals(160, readLen);
+      assertEquals(dataString, new String(data).substring(0, 500));
+
+      readLen = inputStream.read(data, 500, 1);
+      assertEquals(-1, readLen);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
new file mode 100644
index 0000000..15122b9
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
@@ -0,0 +1,253 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.LinkedList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This class tests the versioning of blocks from OM side.
+ */
+public class TestOmBlockVersioning {
+  private static MiniOzoneCluster cluster = null;
+  private static UserArgs userArgs;
+  private static OzoneConfiguration conf;
+  private static OzoneManager ozoneManager;
+  private static StorageHandler storageHandler;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    ozoneManager = cluster.getOzoneManager();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testAllocateCommit() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(1000)
+        .build();
+
+    // 1st update, version 0
+    OpenKeySession openKey = ozoneManager.openKey(keyArgs);
+    ozoneManager.commitKey(keyArgs, openKey.getId());
+
+    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
+    OmKeyLocationInfoGroup highestVersion =
+        checkVersions(keyInfo.getKeyLocationVersions());
+    assertEquals(0, highestVersion.getVersion());
+    assertEquals(1, highestVersion.getLocationList().size());
+
+    // 2nd update, version 1
+    openKey = ozoneManager.openKey(keyArgs);
+    //OmKeyLocationInfo locationInfo =
+    //    ozoneManager.allocateBlock(keyArgs, openKey.getId());
+    ozoneManager.commitKey(keyArgs, openKey.getId());
+
+    keyInfo = ozoneManager.lookupKey(keyArgs);
+    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
+    assertEquals(1, highestVersion.getVersion());
+    assertEquals(2, highestVersion.getLocationList().size());
+
+    // 3rd update, version 2
+    openKey = ozoneManager.openKey(keyArgs);
+    // this block will be appended to the latest version of version 2.
+    ozoneManager.allocateBlock(keyArgs, openKey.getId());
+    ozoneManager.commitKey(keyArgs, openKey.getId());
+
+    keyInfo = ozoneManager.lookupKey(keyArgs);
+    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
+    assertEquals(2, highestVersion.getVersion());
+    assertEquals(4, highestVersion.getLocationList().size());
+  }
+
+  private OmKeyLocationInfoGroup checkVersions(
+      List<OmKeyLocationInfoGroup> versions) {
+    OmKeyLocationInfoGroup currentVersion = null;
+    for (OmKeyLocationInfoGroup version : versions) {
+      if (currentVersion != null) {
+        assertEquals(currentVersion.getVersion() + 1, version.getVersion());
+        for (OmKeyLocationInfo info : currentVersion.getLocationList()) {
+          boolean found = false;
+          // all the blocks from the previous version must present in the next
+          // version
+          for (OmKeyLocationInfo info2 : version.getLocationList()) {
+            if (info.getLocalID() == info2.getLocalID()) {
+              found = true;
+              break;
+            }
+          }
+          assertTrue(found);
+        }
+      }
+      currentVersion = version;
+    }
+    return currentVersion;
+  }
+
+  @Test
+  public void testReadLatestVersion() throws Exception {
+
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(1000)
+        .build();
+
+    String dataString = RandomStringUtils.randomAlphabetic(100);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    // this write will create 1st version with one block
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    byte[] data = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs);
+    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
+    assertEquals(1,
+        keyInfo.getLatestVersionLocations().getLocationList().size());
+
+    // this write will create 2nd version, 2nd version will contain block from
+    // version 1, and add a new block
+    dataString = RandomStringUtils.randomAlphabetic(10);
+    data = new byte[dataString.length()];
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    keyInfo = ozoneManager.lookupKey(omKeyArgs);
+    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(1, keyInfo.getLatestVersionLocations().getVersion());
+    assertEquals(2,
+        keyInfo.getLatestVersionLocations().getLocationList().size());
+
+    dataString = RandomStringUtils.randomAlphabetic(200);
+    data = new byte[dataString.length()];
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    keyInfo = ozoneManager.lookupKey(omKeyArgs);
+    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(2, keyInfo.getLatestVersionLocations().getVersion());
+    assertEquals(3,
+        keyInfo.getLatestVersionLocations().getLocationList().size());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
new file mode 100644
index 0000000..8d0f4b21
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
@@ -0,0 +1,313 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+
+import java.io.IOException;
+
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Test for OM metrics.
+ */
+@SuppressWarnings("deprecation")
+public class TestOmMetrics {
+  private MiniOzoneCluster cluster;
+  private OzoneManager ozoneManager;
+
+  /**
+   * The exception used for testing failure metrics.
+   */
+  private IOException exception = new IOException();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   *
+   * @throws IOException
+   */
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    ozoneManager = cluster.getOzoneManager();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testVolumeOps() throws IOException {
+    VolumeManager volumeManager =
+        (VolumeManager) org.apache.hadoop.test.Whitebox
+            .getInternalState(ozoneManager, "volumeManager");
+    VolumeManager mockVm = Mockito.spy(volumeManager);
+
+    Mockito.doNothing().when(mockVm).createVolume(null);
+    Mockito.doNothing().when(mockVm).deleteVolume(null);
+    Mockito.doReturn(null).when(mockVm).getVolumeInfo(null);
+    Mockito.doReturn(true).when(mockVm).checkVolumeAccess(null, null);
+    Mockito.doNothing().when(mockVm).setOwner(null, null);
+    Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "volumeManager", mockVm);
+    doVolumeOps();
+
+    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumVolumeOps", 6L, omMetrics);
+    assertCounter("NumVolumeCreates", 1L, omMetrics);
+    assertCounter("NumVolumeUpdates", 1L, omMetrics);
+    assertCounter("NumVolumeInfos", 1L, omMetrics);
+    assertCounter("NumVolumeCheckAccesses", 1L, omMetrics);
+    assertCounter("NumVolumeDeletes", 1L, omMetrics);
+    assertCounter("NumVolumeLists", 1L, omMetrics);
+
+    // inject exception to test for Failure Metrics
+    Mockito.doThrow(exception).when(mockVm).createVolume(null);
+    Mockito.doThrow(exception).when(mockVm).deleteVolume(null);
+    Mockito.doThrow(exception).when(mockVm).getVolumeInfo(null);
+    Mockito.doThrow(exception).when(mockVm).checkVolumeAccess(null, null);
+    Mockito.doThrow(exception).when(mockVm).setOwner(null, null);
+    Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, "volumeManager", mockVm);
+    doVolumeOps();
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumVolumeOps", 12L, omMetrics);
+    assertCounter("NumVolumeCreates", 2L, omMetrics);
+    assertCounter("NumVolumeUpdates", 2L, omMetrics);
+    assertCounter("NumVolumeInfos", 2L, omMetrics);
+    assertCounter("NumVolumeCheckAccesses", 2L, omMetrics);
+    assertCounter("NumVolumeDeletes", 2L, omMetrics);
+    assertCounter("NumVolumeLists", 2L, omMetrics);
+
+    assertCounter("NumVolumeCreateFails", 1L, omMetrics);
+    assertCounter("NumVolumeUpdateFails", 1L, omMetrics);
+    assertCounter("NumVolumeInfoFails", 1L, omMetrics);
+    assertCounter("NumVolumeCheckAccessFails", 1L, omMetrics);
+    assertCounter("NumVolumeDeleteFails", 1L, omMetrics);
+    assertCounter("NumVolumeListFails", 1L, omMetrics);
+  }
+
+  @Test
+  public void testBucketOps() throws IOException {
+    BucketManager bucketManager =
+        (BucketManager) org.apache.hadoop.test.Whitebox
+            .getInternalState(ozoneManager, "bucketManager");
+    BucketManager mockBm = Mockito.spy(bucketManager);
+
+    Mockito.doNothing().when(mockBm).createBucket(null);
+    Mockito.doNothing().when(mockBm).deleteBucket(null, null);
+    Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null);
+    Mockito.doNothing().when(mockBm).setBucketProperty(null);
+    Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "bucketManager", mockBm);
+    doBucketOps();
+
+    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumBucketOps", 5L, omMetrics);
+    assertCounter("NumBucketCreates", 1L, omMetrics);
+    assertCounter("NumBucketUpdates", 1L, omMetrics);
+    assertCounter("NumBucketInfos", 1L, omMetrics);
+    assertCounter("NumBucketDeletes", 1L, omMetrics);
+    assertCounter("NumBucketLists", 1L, omMetrics);
+
+    // inject exception to test for Failure Metrics
+    Mockito.doThrow(exception).when(mockBm).createBucket(null);
+    Mockito.doThrow(exception).when(mockBm).deleteBucket(null, null);
+    Mockito.doThrow(exception).when(mockBm).getBucketInfo(null, null);
+    Mockito.doThrow(exception).when(mockBm).setBucketProperty(null);
+    Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "bucketManager", mockBm);
+    doBucketOps();
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumBucketOps", 10L, omMetrics);
+    assertCounter("NumBucketCreates", 2L, omMetrics);
+    assertCounter("NumBucketUpdates", 2L, omMetrics);
+    assertCounter("NumBucketInfos", 2L, omMetrics);
+    assertCounter("NumBucketDeletes", 2L, omMetrics);
+    assertCounter("NumBucketLists", 2L, omMetrics);
+
+    assertCounter("NumBucketCreateFails", 1L, omMetrics);
+    assertCounter("NumBucketUpdateFails", 1L, omMetrics);
+    assertCounter("NumBucketInfoFails", 1L, omMetrics);
+    assertCounter("NumBucketDeleteFails", 1L, omMetrics);
+    assertCounter("NumBucketListFails", 1L, omMetrics);
+  }
+
+  @Test
+  public void testKeyOps() throws IOException {
+    KeyManager bucketManager = (KeyManager) org.apache.hadoop.test.Whitebox
+        .getInternalState(ozoneManager, "keyManager");
+    KeyManager mockKm = Mockito.spy(bucketManager);
+
+    Mockito.doReturn(null).when(mockKm).openKey(null);
+    Mockito.doNothing().when(mockKm).deleteKey(null);
+    Mockito.doReturn(null).when(mockKm).lookupKey(null);
+    Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "keyManager", mockKm);
+    doKeyOps();
+
+    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumKeyOps", 4L, omMetrics);
+    assertCounter("NumKeyAllocate", 1L, omMetrics);
+    assertCounter("NumKeyLookup", 1L, omMetrics);
+    assertCounter("NumKeyDeletes", 1L, omMetrics);
+    assertCounter("NumKeyLists", 1L, omMetrics);
+
+    // inject exception to test for Failure Metrics
+    Mockito.doThrow(exception).when(mockKm).openKey(null);
+    Mockito.doThrow(exception).when(mockKm).deleteKey(null);
+    Mockito.doThrow(exception).when(mockKm).lookupKey(null);
+    Mockito.doThrow(exception).when(mockKm).listKeys(
+        null, null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "keyManager", mockKm);
+    doKeyOps();
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumKeyOps", 8L, omMetrics);
+    assertCounter("NumKeyAllocate", 2L, omMetrics);
+    assertCounter("NumKeyLookup", 2L, omMetrics);
+    assertCounter("NumKeyDeletes", 2L, omMetrics);
+    assertCounter("NumKeyLists", 2L, omMetrics);
+
+    assertCounter("NumKeyAllocateFails", 1L, omMetrics);
+    assertCounter("NumKeyLookupFails", 1L, omMetrics);
+    assertCounter("NumKeyDeleteFails", 1L, omMetrics);
+    assertCounter("NumKeyListFails", 1L, omMetrics);
+  }
+
+  /**
+   * Test volume operations with ignoring thrown exception.
+   */
+  private void doVolumeOps() {
+    try {
+      ozoneManager.createVolume(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.deleteVolume(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.getVolumeInfo(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.checkVolumeAccess(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.setOwner(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.listAllVolumes(null, null, 0);
+    } catch (IOException ignored) {
+    }
+  }
+
+  /**
+   * Test bucket operations with ignoring thrown exception.
+   */
+  private void doBucketOps() {
+    try {
+      ozoneManager.createBucket(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.deleteBucket(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.getBucketInfo(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.setBucketProperty(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.listBuckets(null, null, null, 0);
+    } catch (IOException ignored) {
+    }
+  }
+
+  /**
+   * Test key operations with ignoring thrown exception.
+   */
+  private void doKeyOps() {
+    try {
+      ozoneManager.openKey(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.deleteKey(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.lookupKey(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.listKeys(null, null, null, null, 0);
+    } catch (IOException ignored) {
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
new file mode 100644
index 0000000..005a012
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.scm.cli.SQLCLI;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This class tests the CLI that transforms om.db into SQLite DB files.
+ */
+@RunWith(Parameterized.class)
+public class TestOmSQLCli {
+  private MiniOzoneCluster cluster = null;
+  private StorageHandler storageHandler;
+  private UserArgs userArgs;
+  private OzoneConfiguration conf;
+  private SQLCLI cli;
+
+  private String userName = "userTest";
+  private String adminName = "adminTest";
+  private String volumeName0 = "volumeTest0";
+  private String volumeName1 = "volumeTest1";
+  private String bucketName0 = "bucketTest0";
+  private String bucketName1 = "bucketTest1";
+  private String bucketName2 = "bucketTest2";
+  private String keyName0 = "key0";
+  private String keyName1 = "key1";
+  private String keyName2 = "key2";
+  private String keyName3 = "key3";
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {
+        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
+        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
+    });
+  }
+
+  private String metaStoreType;
+
+  public TestOmSQLCli(String type) {
+    metaStoreType = type;
+  }
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @Before
+  public void setup() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    cluster.waitForClusterToBeReady();
+
+    VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs);
+    createVolumeArgs0.setUserName(userName);
+    createVolumeArgs0.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs0);
+    VolumeArgs createVolumeArgs1 = new VolumeArgs(volumeName1, userArgs);
+    createVolumeArgs1.setUserName(userName);
+    createVolumeArgs1.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs1);
+
+    BucketArgs bucketArgs0 = new BucketArgs(volumeName0, bucketName0, userArgs);
+    storageHandler.createBucket(bucketArgs0);
+    BucketArgs bucketArgs1 = new BucketArgs(volumeName1, bucketName1, userArgs);
+    storageHandler.createBucket(bucketArgs1);
+    BucketArgs bucketArgs2 = new BucketArgs(volumeName0, bucketName2, userArgs);
+    storageHandler.createBucket(bucketArgs2);
+
+    KeyArgs keyArgs0 =
+        new KeyArgs(volumeName0, bucketName0, keyName0, userArgs);
+    keyArgs0.setSize(100);
+    KeyArgs keyArgs1 =
+        new KeyArgs(volumeName1, bucketName1, keyName1, userArgs);
+    keyArgs1.setSize(200);
+    KeyArgs keyArgs2 =
+        new KeyArgs(volumeName0, bucketName2, keyName2, userArgs);
+    keyArgs2.setSize(300);
+    KeyArgs keyArgs3 =
+        new KeyArgs(volumeName0, bucketName2, keyName3, userArgs);
+    keyArgs3.setSize(400);
+
+    OutputStream stream = storageHandler.newKeyWriter(keyArgs0);
+    stream.close();
+    stream = storageHandler.newKeyWriter(keyArgs1);
+    stream.close();
+    stream = storageHandler.newKeyWriter(keyArgs2);
+    stream.close();
+    stream = storageHandler.newKeyWriter(keyArgs3);
+    stream.close();
+
+    cluster.getOzoneManager().stop();
+    cluster.getStorageContainerManager().stop();
+    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
+    cli = new SQLCLI(conf);
+  }
+
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testOmDB() throws Exception {
+    String dbOutPath =  GenericTestUtils.getTempPath(
+        UUID.randomUUID() + "/out_sql.db");
+
+    String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
+    String dbPath = dbRootPath + "/" + OM_DB_NAME;
+    String[] args = {"-p", dbPath, "-o", dbOutPath};
+
+    cli.run(args);
+
+    Connection conn = connectDB(dbOutPath);
+    String sql = "SELECT * FROM volumeList";
+    ResultSet rs = executeQuery(conn, sql);
+    List<String> expectedValues =
+        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
+    while (rs.next()) {
+      String userNameRs = rs.getString("userName");
+      String volumeNameRs = rs.getString("volumeName");
+      assertEquals(userName,  userNameRs.substring(1));
+      assertTrue(expectedValues.remove(volumeNameRs));
+    }
+    assertEquals(0, expectedValues.size());
+
+    sql = "SELECT * FROM volumeInfo";
+    rs = executeQuery(conn, sql);
+    expectedValues =
+        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
+    while (rs.next()) {
+      String adName = rs.getString("adminName");
+      String ownerName = rs.getString("ownerName");
+      String volumeName = rs.getString("volumeName");
+      assertEquals(adminName, adName);
+      assertEquals(userName, ownerName);
+      assertTrue(expectedValues.remove(volumeName));
+    }
+    assertEquals(0, expectedValues.size());
+
+    sql = "SELECT * FROM aclInfo";
+    rs = executeQuery(conn, sql);
+    expectedValues =
+        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
+    while (rs.next()) {
+      String adName = rs.getString("adminName");
+      String ownerName = rs.getString("ownerName");
+      String volumeName = rs.getString("volumeName");
+      String type = rs.getString("type");
+      String uName = rs.getString("userName");
+      String rights = rs.getString("rights");
+      assertEquals(adminName, adName);
+      assertEquals(userName, ownerName);
+      assertEquals("USER", type);
+      assertEquals(userName, uName);
+      assertEquals("READ_WRITE", rights);
+      assertTrue(expectedValues.remove(volumeName));
+    }
+    assertEquals(0, expectedValues.size());
+
+    sql = "SELECT * FROM bucketInfo";
+    rs = executeQuery(conn, sql);
+    HashMap<String, String> expectedMap = new HashMap<>();
+    expectedMap.put(bucketName0, volumeName0);
+    expectedMap.put(bucketName2, volumeName0);
+    expectedMap.put(bucketName1, volumeName1);
+    while (rs.next()) {
+      String volumeName = rs.getString("volumeName");
+      String bucketName = rs.getString("bucketName");
+      boolean versionEnabled = rs.getBoolean("versionEnabled");
+      String storegeType = rs.getString("storageType");
+      assertEquals(volumeName, expectedMap.remove(bucketName));
+      assertFalse(versionEnabled);
+      assertEquals("DISK", storegeType);
+    }
+    assertEquals(0, expectedMap.size());
+
+    sql = "SELECT * FROM keyInfo";
+    rs = executeQuery(conn, sql);
+    HashMap<String, List<String>> expectedMap2 = new HashMap<>();
+    // no data written, data size will be 0
+    expectedMap2.put(keyName0,
+        Arrays.asList(volumeName0, bucketName0, "0"));
+    expectedMap2.put(keyName1,
+        Arrays.asList(volumeName1, bucketName1, "0"));
+    expectedMap2.put(keyName2,
+        Arrays.asList(volumeName0, bucketName2, "0"));
+    expectedMap2.put(keyName3,
+        Arrays.asList(volumeName0, bucketName2, "0"));
+    while (rs.next()) {
+      String volumeName = rs.getString("volumeName");
+      String bucketName = rs.getString("bucketName");
+      String keyName = rs.getString("keyName");
+      int dataSize = rs.getInt("dataSize");
+      List<String> vals = expectedMap2.remove(keyName);
+      assertNotNull(vals);
+      assertEquals(vals.get(0), volumeName);
+      assertEquals(vals.get(1), bucketName);
+      assertEquals(vals.get(2), Integer.toString(dataSize));
+    }
+    assertEquals(0, expectedMap2.size());
+
+    conn.close();
+    Files.delete(Paths.get(dbOutPath));
+  }
+
+  private ResultSet executeQuery(Connection conn, String sql)
+      throws SQLException {
+    Statement stmt = conn.createStatement();
+    return stmt.executeQuery(sql);
+  }
+
+  private Connection connectDB(String dbPath) throws Exception {
+    Class.forName("org.sqlite.JDBC");
+    String connectPath =
+        String.format("jdbc:sqlite:%s", dbPath);
+    return DriverManager.getConnection(connectPath);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HADOOP-15571. Multiple FileContexts created with the same configuration object should be allowed to have different umask. Contributed by Vinod Kumar Vavilapalli.

Posted by bh...@apache.org.
HADOOP-15571. Multiple FileContexts created with the same configuration object should be allowed to have different umask. Contributed by Vinod Kumar Vavilapalli.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/498e3bfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/498e3bfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/498e3bfb

Branch: refs/heads/HDDS-48
Commit: 498e3bfb6b93bf542e5581d83e64e920983fe87e
Parents: a129e3e
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu Jul 5 14:19:05 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 6 11:56:09 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileContext.java  |  9 ++--
 .../org/apache/hadoop/fs/TestFileContext.java   | 44 +++++++++++++++++++-
 .../logaggregation/AggregatedLogFormat.java     |  6 +--
 3 files changed, 49 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e3bfb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 5215c3c..0b3889b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -219,10 +219,12 @@ public class FileContext {
    * The FileContext is defined by.
    *  1) defaultFS (slash)
    *  2) wd
-   *  3) umask (Obtained by FsPermission.getUMask(conf))
+   *  3) umask (explicitly set via setUMask(),
+   *      falling back to FsPermission.getUMask(conf))
    */   
   private final AbstractFileSystem defaultFS; //default FS for this FileContext.
   private Path workingDir;          // Fully qualified
+  private FsPermission umask;
   private final Configuration conf;
   private final UserGroupInformation ugi;
   final boolean resolveSymlinks;
@@ -575,7 +577,7 @@ public class FileContext {
    * @return the umask of this FileContext
    */
   public FsPermission getUMask() {
-    return FsPermission.getUMask(conf);
+    return (umask != null ? umask : FsPermission.getUMask(conf));
   }
   
   /**
@@ -583,10 +585,9 @@ public class FileContext {
    * @param newUmask  the new umask
    */
   public void setUMask(final FsPermission newUmask) {
-    FsPermission.setUMask(conf, newUmask);
+    this.umask = newUmask;
   }
   
-  
   /**
    * Resolve the path following any symlinks or mount points
    * @param f to be resolved

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e3bfb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
index f5fb06f..60b24c7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
@@ -17,13 +17,17 @@
  */
 package org.apache.hadoop.fs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.net.URI;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.fail;
-
 public class TestFileContext {
   private static final Logger LOG = LoggerFactory.getLogger(TestFileContext
       .class);
@@ -39,4 +43,40 @@ public class TestFileContext {
       LOG.info("Expected exception: ", ufse);
     }
   }
+
+  @Test
+  public void testConfBasedAndAPIBasedSetUMask() throws Exception {
+
+    Configuration conf = new Configuration();
+
+    String defaultlUMask =
+        conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
+    assertEquals("Default UMask changed!", "022", defaultlUMask);
+
+    URI uri1 = new URI("file://mydfs:50070/");
+    URI uri2 = new URI("file://tmp");
+
+    FileContext fc1 = FileContext.getFileContext(uri1, conf);
+    FileContext fc2 = FileContext.getFileContext(uri2, conf);
+    assertEquals("Umask for fc1 is incorrect", 022, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 022, fc2.getUMask().toShort());
+
+    // Till a user explicitly calls FileContext.setUMask(), the updates through
+    // configuration should be reflected..
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "011");
+    assertEquals("Umask for fc1 is incorrect", 011, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 011, fc2.getUMask().toShort());
+
+    // Stop reflecting the conf update for specific FileContexts, once an
+    // explicit setUMask is done.
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "066");
+    fc1.setUMask(FsPermission.createImmutable((short) 00033));
+    assertEquals("Umask for fc1 is incorrect", 033, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 066, fc2.getUMask().toShort());
+
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
+    fc2.setUMask(FsPermission.createImmutable((short) 00044));
+    assertEquals("Umask for fc1 is incorrect", 033, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 044, fc2.getUMask().toShort());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e3bfb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 81d5053..4ee5c8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -96,9 +96,6 @@ public class AggregatedLogFormat {
    */
   private static final FsPermission APP_LOG_FILE_UMASK = FsPermission
       .createImmutable((short) (0640 ^ 0777));
-  /** Default permission for the log file. */
-  private static final FsPermission APP_LOG_FILE_PERM =
-      FsPermission.getFileDefault().applyUMask(APP_LOG_FILE_UMASK);
 
   static {
     RESERVED_KEYS = new HashMap<String, AggregatedLogFormat.LogKey>();
@@ -477,10 +474,11 @@ public class AggregatedLogFormat {
               @Override
               public FSDataOutputStream run() throws Exception {
                 fc = FileContext.getFileContext(remoteAppLogFile.toUri(), conf);
+                fc.setUMask(APP_LOG_FILE_UMASK);
                 return fc.create(
                     remoteAppLogFile,
                     EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
-                    Options.CreateOpts.perms(APP_LOG_FILE_PERM));
+                    new Options.CreateOpts[] {});
               }
             });
       } catch (InterruptedException e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDDS-217. Move all SCMEvents to a package. Contributed by Anu Engineer.

Posted by bh...@apache.org.
HDDS-217. Move all SCMEvents to a package.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f51cd60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f51cd60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f51cd60

Branch: refs/heads/HDDS-48
Commit: 2f51cd60ef082cd0360fe46e9d2a4ec9b8ed979a
Parents: 936e0df
Author: Anu Engineer <ae...@apache.org>
Authored: Sun Jul 8 11:11:21 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sun Jul 8 11:11:21 2018 -0700

----------------------------------------------------------------------
 .../container/CloseContainerEventHandler.java   | 13 ++--
 .../hadoop/hdds/scm/events/SCMEvents.java       | 80 ++++++++++++++++++++
 .../hadoop/hdds/scm/events/package-info.java    | 23 ++++++
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |  5 +-
 .../server/SCMDatanodeHeartbeatDispatcher.java  | 11 ++-
 .../scm/server/StorageContainerManager.java     |  7 +-
 .../TestCloseContainerEventHandler.java         | 27 ++++---
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 10 +--
 .../TestSCMDatanodeHeartbeatDispatcher.java     | 20 +++--
 9 files changed, 147 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 7b24538..f1053d5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -24,15 +24,14 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * In case of a node failure, volume failure, volume out of spapce, node
- * out of space etc, CLOSE_CONTAINER_EVENT will be triggered.
- * CloseContainerEventHandler is the handler for CLOSE_CONTAINER_EVENT.
+ * out of space etc, CLOSE_CONTAINER will be triggered.
+ * CloseContainerEventHandler is the handler for CLOSE_CONTAINER.
  * When a close container event is fired, a close command for the container
  * should be sent to all the datanodes in the pipeline and containerStateManager
  * needs to update the container state to Closing.
@@ -42,8 +41,6 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
   public static final Logger LOG =
       LoggerFactory.getLogger(CloseContainerEventHandler.class);
 
-  public static final TypedEvent<ContainerID> CLOSE_CONTAINER_EVENT =
-            new TypedEvent<>(ContainerID.class);
 
   private final Mapping containerManager;
 
@@ -59,7 +56,8 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
     ContainerWithPipeline containerWithPipeline = null;
     ContainerInfo info;
     try {
-      containerWithPipeline = containerManager.getContainerWithPipeline(containerID.getId());
+      containerWithPipeline =
+          containerManager.getContainerWithPipeline(containerID.getId());
       info = containerWithPipeline.getContainerInfo();
       if (info == null) {
         LOG.info("Failed to update the container state. Container with id : {} "
@@ -73,7 +71,8 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
     }
 
     if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
-      for (DatanodeDetails datanode : containerWithPipeline.getPipeline().getMachines()) {
+      for (DatanodeDetails datanode :
+          containerWithPipeline.getPipeline().getMachines()) {
         containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
             new CloseContainerCommand(containerID.getId(),
                 info.getReplicationType()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
new file mode 100644
index 0000000..2c9c431
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.events;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
+
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+
+/**
+ * Class that acts as the namespace for all SCM Events.
+ */
+public final class SCMEvents {
+
+  /**
+   * NodeReports are  sent out by Datanodes. This report is
+   * received by SCMDatanodeHeartbeatDispatcher and NodeReport Event is
+   * generated.
+   */
+  public static final TypedEvent<NodeReportFromDatanode> NODE_REPORT =
+      new TypedEvent<>(NodeReportFromDatanode.class, "Node_Report");
+  /**
+   * ContainerReports are send out by Datanodes. This report
+   * is received by SCMDatanodeHeartbeatDispatcher and Container_Report Event
+   * i generated.
+   */
+  public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
+      new TypedEvent<>(ContainerReportFromDatanode.class, "Container_Report");
+
+  /**
+   * When ever a command for the Datanode needs to be issued by any component
+   * inside SCM, a Datanode_Command event is generated. NodeManager listens
+   * to these events and dispatches them to Datanode for further processing.
+   */
+  public static final Event<CommandForDatanode> DATANODE_COMMAND =
+      new TypedEvent<>(CommandForDatanode.class, "Datanode_Command");
+
+  /**
+   * A Close Container Event can be triggered under many condition.
+   * Some of them are:
+   *    1. A Container is full, then we stop writing further information to
+   *    that container. DN's let SCM know that current state and sends a
+   *    informational message that allows SCM to close the container.
+   *
+   *    2. If a pipeline is open; for example Ratis; if a single node fails,
+   *    we will proactively close these containers.
+   *
+   *  Once a command is dispatched to DN, we will also listen to updates from
+   *  the datanode which lets us know that this command completed or timed out.
+   */
+  public static final TypedEvent<ContainerID> CLOSE_CONTAINER =
+      new TypedEvent<>(ContainerID.class, "Close_Container");
+
+  /**
+   * Private Ctor. Never Constructed.
+   */
+  private SCMEvents() {
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
new file mode 100644
index 0000000..46181a3
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Events Package contains all the Events used by SCM internally to
+ * communicate between different sub-systems that make up SCM.
+ */
+package org.apache.hadoop.hdds.scm.events;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 15ac3f2..664a80f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -25,10 +25,8 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -118,8 +116,7 @@ public class SCMNodeManager
   // Node pool manager.
   private final StorageContainerManager scmManager;
 
-  public static final Event<CommandForDatanode> DATANODE_COMMAND =
-      new TypedEvent<>(CommandForDatanode.class, "DATANODE_COMMAND");
+
 
   /**
    * Constructs SCM machine Manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index f221584..a6354af 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -25,12 +25,14 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
 
 import com.google.protobuf.GeneratedMessage;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
+
 /**
  * This class is responsible for dispatching heartbeat from datanode to
  * appropriate EventHandler at SCM.
@@ -42,11 +44,6 @@ public final class SCMDatanodeHeartbeatDispatcher {
 
   private EventPublisher eventPublisher;
 
-  public static final TypedEvent<NodeReportFromDatanode> NODE_REPORT =
-      new TypedEvent<>(NodeReportFromDatanode.class);
-
-  public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
-      new TypedEvent<ContainerReportFromDatanode>(ContainerReportFromDatanode.class);
 
   public SCMDatanodeHeartbeatDispatcher(EventPublisher eventPublisher) {
     this.eventPublisher = eventPublisher;
@@ -63,12 +60,14 @@ public final class SCMDatanodeHeartbeatDispatcher {
         DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails());
     // should we dispatch heartbeat through eventPublisher?
     if (heartbeat.hasNodeReport()) {
+      LOG.debug("Dispatching Node Report.");
       eventPublisher.fireEvent(NODE_REPORT,
           new NodeReportFromDatanode(datanodeDetails,
               heartbeat.getNodeReport()));
     }
 
     if (heartbeat.hasContainerReport()) {
+      LOG.debug("Dispatching Container Report.");
       eventPublisher.fireEvent(CONTAINER_REPORT,
           new ContainerReportFromDatanode(datanodeDetails,
               heartbeat.getContainerReport()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 568a86a..49d3a40 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -70,6 +70,8 @@ import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
+
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
@@ -164,9 +166,10 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     }
     EventQueue eventQueue = new EventQueue();
 
-    SCMNodeManager nm = new SCMNodeManager(conf, scmStorage.getClusterID(), this);
+    SCMNodeManager nm =
+        new SCMNodeManager(conf, scmStorage.getClusterID(), this);
     scmNodeManager = nm;
-    eventQueue.addHandler(SCMNodeManager.DATANODE_COMMAND, nm);
+    eventQueue.addHandler(DATANODE_COMMAND, nm);
 
     scmContainerManager = new ContainerMapping(conf, getScmNodeManager(),
         cacheSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 721dbf6..0d46ffa 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -17,11 +17,13 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+    .ContainerWithPipeline;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -33,12 +35,12 @@ import org.junit.Test;
 
 import java.io.File;
 import java.io.IOException;
-import java.util.Random;
 
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
 
 /**
  * Tests the closeContainerEventHandler class.
@@ -65,7 +67,7 @@ public class TestCloseContainerEventHandler {
     nodeManager = new MockNodeManager(true, 10);
     mapping = new ContainerMapping(configuration, nodeManager, 128);
     eventQueue = new EventQueue();
-    eventQueue.addHandler(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
+    eventQueue.addHandler(CLOSE_CONTAINER,
         new CloseContainerEventHandler(mapping));
   }
 
@@ -81,8 +83,8 @@ public class TestCloseContainerEventHandler {
   public void testIfCloseContainerEventHadnlerInvoked() {
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
-        new ContainerID(Math.abs(new Random().nextLong())));
+    eventQueue.fireEvent(CLOSE_CONTAINER,
+        new ContainerID(Math.abs(RandomUtils.nextInt())));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
         .contains("Close container Event triggered for container"));
@@ -90,10 +92,10 @@ public class TestCloseContainerEventHandler {
 
   @Test
   public void testCloseContainerEventWithInvalidContainer() {
-    long id = Math.abs(new Random().nextLong());
+    long id = Math.abs(RandomUtils.nextInt());
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
+    eventQueue.fireEvent(CLOSE_CONTAINER,
         new ContainerID(id));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
@@ -112,7 +114,7 @@ public class TestCloseContainerEventHandler {
         containerWithPipeline.getContainerInfo().getContainerID());
     DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
     int closeCount = nodeManager.getCommandCount(datanode);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
+    eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
     // At this point of time, the allocated container is not in open
     // state, so firing close container event should not queue CLOSE
@@ -125,11 +127,12 @@ public class TestCloseContainerEventHandler {
     //Execute these state transitions so that we can close the container.
     mapping.updateContainerState(id.getId(), CREATE);
     mapping.updateContainerState(id.getId(), CREATED);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
+    eventQueue.fireEvent(CLOSE_CONTAINER,
         new ContainerID(
             containerWithPipeline.getContainerInfo().getContainerID()));
     eventQueue.processAll(1000);
-    Assert.assertEquals(closeCount + 1, nodeManager.getCommandCount(datanode));
+    Assert.assertEquals(closeCount + 1,
+        nodeManager.getCommandCount(datanode));
     Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
         mapping.getStateManager().getContainer(id).getState());
   }
@@ -145,7 +148,7 @@ public class TestCloseContainerEventHandler {
     ContainerID id = new ContainerID(
         containerWithPipeline.getContainerInfo().getContainerID());
     int[] closeCount = new int[3];
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
+    eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
     int i = 0;
     for (DatanodeDetails details : containerWithPipeline.getPipeline()
@@ -166,7 +169,7 @@ public class TestCloseContainerEventHandler {
     //Execute these state transitions so that we can close the container.
     mapping.updateContainerState(id.getId(), CREATE);
     mapping.updateContainerState(id.getId(), CREATED);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
+    eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
     i = 0;
     // Make sure close is queued for each datanode on the pipeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 0a4e33d..d72309e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -68,6 +68,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
     .HEALTHY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.core.StringStartsWith.startsWith;
 import static org.junit.Assert.assertEquals;
@@ -1068,11 +1069,6 @@ public class TestNodeManager {
       foundRemaining = nodeManager.getStats().getRemaining().get();
       assertEquals(0, foundRemaining);
 
-      // Send a new report to bring the dead node back to healthy
-      String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-      List<StorageReportProto> reports = TestUtils
-          .createStorageReport(capacity, expectedScmUsed, expectedRemaining,
-              storagePath, null, dnId, 1);
       nodeManager.processHeartbeat(datanodeDetails);
 
       // Wait up to 5 seconds so that the dead node becomes healthy
@@ -1111,11 +1107,11 @@ public class TestNodeManager {
 
     EventQueue eq = new EventQueue();
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
-      eq.addHandler(SCMNodeManager.DATANODE_COMMAND, nodemanager);
+      eq.addHandler(DATANODE_COMMAND, nodemanager);
 
       nodemanager
           .register(datanodeDetails, TestUtils.createNodeReport(reports));
-      eq.fireEvent(SCMNodeManager.DATANODE_COMMAND,
+      eq.fireEvent(DATANODE_COMMAND,
           new CommandForDatanode(datanodeDetails.getUuid(),
               new CloseContainerCommand(1L, ReplicationType.STAND_ALONE)));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
index 326a34b..a77ed04 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdds.scm.server;
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -40,6 +38,9 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
+
 /**
  * This class tests the behavior of SCMDatanodeHeartbeatDispatcher.
  */
@@ -49,8 +50,6 @@ public class TestSCMDatanodeHeartbeatDispatcher {
   @Test
   public void testNodeReportDispatcher() throws IOException {
 
-    Configuration conf = new OzoneConfiguration();
-
     AtomicInteger eventReceived = new AtomicInteger();
 
     NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
@@ -60,10 +59,10 @@ public class TestSCMDatanodeHeartbeatDispatcher {
           @Override
           public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
               EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertEquals(event,
-                SCMDatanodeHeartbeatDispatcher.NODE_REPORT);
+            Assert.assertEquals(event, NODE_REPORT);
             eventReceived.incrementAndGet();
-            Assert.assertEquals(nodeReport, ((NodeReportFromDatanode)payload).getReport());
+            Assert.assertEquals(nodeReport,
+                ((NodeReportFromDatanode)payload).getReport());
 
           }
         });
@@ -84,7 +83,6 @@ public class TestSCMDatanodeHeartbeatDispatcher {
   @Test
   public void testContainerReportDispatcher() throws IOException {
 
-    Configuration conf = new OzoneConfiguration();
 
     AtomicInteger eventReceived = new AtomicInteger();
 
@@ -96,9 +94,9 @@ public class TestSCMDatanodeHeartbeatDispatcher {
           @Override
           public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
               EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertEquals(event,
-                SCMDatanodeHeartbeatDispatcher.CONTAINER_REPORT);
-            Assert.assertEquals(containerReport, ((ContainerReportFromDatanode)payload).getReport());
+            Assert.assertEquals(event, CONTAINER_REPORT);
+            Assert.assertEquals(containerReport,
+                ((ContainerReportFromDatanode)payload).getReport());
             eventReceived.incrementAndGet();
           }
         });


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-8506. Make GetApplicationsRequestPBImpl thread safe. (wangda)

Posted by bh...@apache.org.
YARN-8506. Make GetApplicationsRequestPBImpl thread safe. (wangda)

Change-Id: If304567abb77a01b686d82c769bdf50728484163


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83cd84b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83cd84b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83cd84b7

Branch: refs/heads/HDDS-48
Commit: 83cd84b70bac7b613eb4b2901d5ffe40098692eb
Parents: 0838fe8
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Jul 9 11:30:08 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Mon Jul 9 11:30:08 2018 -0700

----------------------------------------------------------------------
 .../impl/pb/GetApplicationsRequestPBImpl.java   | 44 ++++++++++----------
 1 file changed, 22 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83cd84b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
index a6abb99..4c5fee0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
@@ -65,7 +65,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
     viaProto = true;
   }
 
-  public GetApplicationsRequestProto getProto() {
+  public synchronized GetApplicationsRequestProto getProto() {
     mergeLocalToProto();
     proto = viaProto ? proto : builder.build();
     viaProto = true;
@@ -175,13 +175,13 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public Set<String> getApplicationTypes() {
+  public synchronized Set<String> getApplicationTypes() {
     initApplicationTypes();
     return this.applicationTypes;
   }
 
   @Override
-  public void setApplicationTypes(Set<String> applicationTypes) {
+  public synchronized void setApplicationTypes(Set<String> applicationTypes) {
     maybeInitBuilder();
     if (applicationTypes == null)
       builder.clearApplicationTypes();
@@ -198,13 +198,13 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public Set<String> getApplicationTags() {
+  public synchronized Set<String> getApplicationTags() {
     initApplicationTags();
     return this.applicationTags;
   }
 
   @Override
-  public void setApplicationTags(Set<String> tags) {
+  public synchronized void setApplicationTags(Set<String> tags) {
     maybeInitBuilder();
     if (tags == null || tags.isEmpty()) {
       builder.clearApplicationTags();
@@ -219,7 +219,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public EnumSet<YarnApplicationState> getApplicationStates() {
+  public synchronized EnumSet<YarnApplicationState> getApplicationStates() {
     initApplicationStates();
     return this.applicationStates;
   }
@@ -233,12 +233,12 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public ApplicationsRequestScope getScope() {
+  public synchronized ApplicationsRequestScope getScope() {
     initScope();
     return this.scope;
   }
 
-  public void setScope(ApplicationsRequestScope scope) {
+  public synchronized void setScope(ApplicationsRequestScope scope) {
     maybeInitBuilder();
     if (scope == null) {
       builder.clearScope();
@@ -247,7 +247,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public void setApplicationStates(EnumSet<YarnApplicationState> applicationStates) {
+  public synchronized void setApplicationStates(EnumSet<YarnApplicationState> applicationStates) {
     maybeInitBuilder();
     if (applicationStates == null) {
       builder.clearApplicationStates();
@@ -256,7 +256,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public void setApplicationStates(Set<String> applicationStates) {
+  public synchronized void setApplicationStates(Set<String> applicationStates) {
     EnumSet<YarnApplicationState> appStates = null;
     for (YarnApplicationState state : YarnApplicationState.values()) {
       if (applicationStates.contains(
@@ -272,12 +272,12 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public Set<String> getUsers() {
+  public synchronized Set<String> getUsers() {
     initUsers();
     return this.users;
   }
 
-  public void setUsers(Set<String> users) {
+  public synchronized void setUsers(Set<String> users) {
     maybeInitBuilder();
     if (users == null) {
       builder.clearUsers();
@@ -286,13 +286,13 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public Set<String> getQueues() {
+  public synchronized Set<String> getQueues() {
     initQueues();
     return this.queues;
   }
 
   @Override
-  public void setQueues(Set<String> queues) {
+  public synchronized void setQueues(Set<String> queues) {
     maybeInitBuilder();
     if (queues == null) {
       builder.clearQueues();
@@ -301,7 +301,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public long getLimit() {
+  public synchronized long getLimit() {
     if (this.limit == Long.MAX_VALUE) {
       GetApplicationsRequestProtoOrBuilder p = viaProto ? proto : builder;
       this.limit = p.hasLimit() ? p.getLimit() : Long.MAX_VALUE;
@@ -310,13 +310,13 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public void setLimit(long limit) {
+  public synchronized void setLimit(long limit) {
     maybeInitBuilder();
     this.limit = limit;
   }
 
   @Override
-  public Range<Long> getStartRange() {
+  public synchronized Range<Long> getStartRange() {
     if (this.start == null) {
       GetApplicationsRequestProtoOrBuilder p = viaProto ? proto: builder;
       if (p.hasStartBegin() || p.hasStartEnd()) {
@@ -329,12 +329,12 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public void setStartRange(Range<Long> range) {
+  public synchronized void setStartRange(Range<Long> range) {
     this.start = range;
   }
 
   @Override
-  public void setStartRange(long begin, long end)
+  public synchronized void setStartRange(long begin, long end)
       throws IllegalArgumentException {
     if (begin > end) {
       throw new IllegalArgumentException("begin > end in range (begin, " +
@@ -344,7 +344,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public Range<Long> getFinishRange() {
+  public synchronized Range<Long> getFinishRange() {
     if (this.finish == null) {
       GetApplicationsRequestProtoOrBuilder p = viaProto ? proto: builder;
       if (p.hasFinishBegin() || p.hasFinishEnd()) {
@@ -357,12 +357,12 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   }
 
   @Override
-  public void setFinishRange(Range<Long> range) {
+  public synchronized void setFinishRange(Range<Long> range) {
     this.finish = range;
   }
 
   @Override
-  public void setFinishRange(long begin, long end) {
+  public synchronized void setFinishRange(long begin, long end) {
     if (begin > end) {
       throw new IllegalArgumentException("begin > end in range (begin, " +
           "end): (" + begin + ", " + end + ")");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDFS-13721. NPE in DataNode due to uninitialized DiskBalancer.

Posted by bh...@apache.org.
HDFS-13721. NPE in DataNode due to uninitialized DiskBalancer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/936e0df0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/936e0df0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/936e0df0

Branch: refs/heads/HDDS-48
Commit: 936e0df0d344f13eea97fe624b154e8356cdea7c
Parents: ba68320
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Jul 6 20:45:27 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Jul 6 21:00:32 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/DataNode.java    | 19 +++++++++++++------
 .../server/diskbalancer/TestDiskBalancer.java    |  5 +++++
 2 files changed, 18 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/936e0df0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 4baafb9..7df92f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3132,7 +3132,7 @@ public class DataNode extends ReconfigurableBase
   @Override // DataNodeMXBean
   public String getDiskBalancerStatus() {
     try {
-      return this.diskBalancer.queryWorkStatus().toJsonString();
+      return getDiskBalancer().queryWorkStatus().toJsonString();
     } catch (IOException ex) {
       LOG.debug("Reading diskbalancer Status failed. ex:{}", ex);
       return "";
@@ -3510,7 +3510,7 @@ public class DataNode extends ReconfigurableBase
           DiskBalancerException.Result.DATANODE_STATUS_NOT_REGULAR);
     }
 
-    this.diskBalancer.submitPlan(planID, planVersion, planFile, planData,
+    getDiskBalancer().submitPlan(planID, planVersion, planFile, planData,
             skipDateCheck);
   }
 
@@ -3522,7 +3522,7 @@ public class DataNode extends ReconfigurableBase
   public void cancelDiskBalancePlan(String planID) throws
       IOException {
     checkSuperuserPrivilege();
-    this.diskBalancer.cancelPlan(planID);
+    getDiskBalancer().cancelPlan(planID);
   }
 
   /**
@@ -3533,7 +3533,7 @@ public class DataNode extends ReconfigurableBase
   @Override
   public DiskBalancerWorkStatus queryDiskBalancerPlan() throws IOException {
     checkSuperuserPrivilege();
-    return this.diskBalancer.queryWorkStatus();
+    return getDiskBalancer().queryWorkStatus();
   }
 
   /**
@@ -3550,9 +3550,9 @@ public class DataNode extends ReconfigurableBase
     Preconditions.checkNotNull(key);
     switch (key) {
     case DiskBalancerConstants.DISKBALANCER_VOLUME_NAME:
-      return this.diskBalancer.getVolumeNames();
+      return getDiskBalancer().getVolumeNames();
     case DiskBalancerConstants.DISKBALANCER_BANDWIDTH :
-      return Long.toString(this.diskBalancer.getBandwidth());
+      return Long.toString(getDiskBalancer().getBandwidth());
     default:
       LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: {}",
           key);
@@ -3606,4 +3606,11 @@ public class DataNode extends ReconfigurableBase
     }
     return volumeInfoList;
   }
+
+  private DiskBalancer getDiskBalancer() throws IOException {
+    if (this.diskBalancer == null) {
+      throw new IOException("DiskBalancer is not initialized");
+    }
+    return this.diskBalancer;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/936e0df0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index 55cc57e..e789694 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -109,6 +109,11 @@ public class TestDiskBalancer {
           .getFsVolumeReferences()) {
         assertEquals(ref.size(), dbDnNode.getVolumeCount());
       }
+
+      // Shutdown the DN first, to verify that calling diskbalancer APIs on
+      // uninitialized DN doesn't NPE
+      dnNode.shutdown();
+      assertEquals("", dnNode.getDiskBalancerStatus());
     } finally {
       cluster.shutdown();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
new file mode 100644
index 0000000..2d04452
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -0,0 +1,459 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+/**
+ * This class is for maintaining Ozone Manager statistics.
+ */
+@InterfaceAudience.Private
+@Metrics(about="Ozone Manager Metrics", context="dfs")
+public class OMMetrics {
+  private static final String SOURCE_NAME =
+      OMMetrics.class.getSimpleName();
+
+  // OM request type op metrics
+  private @Metric MutableCounterLong numVolumeOps;
+  private @Metric MutableCounterLong numBucketOps;
+  private @Metric MutableCounterLong numKeyOps;
+
+  // OM op metrics
+  private @Metric MutableCounterLong numVolumeCreates;
+  private @Metric MutableCounterLong numVolumeUpdates;
+  private @Metric MutableCounterLong numVolumeInfos;
+  private @Metric MutableCounterLong numVolumeCheckAccesses;
+  private @Metric MutableCounterLong numBucketCreates;
+  private @Metric MutableCounterLong numVolumeDeletes;
+  private @Metric MutableCounterLong numBucketInfos;
+  private @Metric MutableCounterLong numBucketUpdates;
+  private @Metric MutableCounterLong numBucketDeletes;
+  private @Metric MutableCounterLong numKeyAllocate;
+  private @Metric MutableCounterLong numKeyLookup;
+  private @Metric MutableCounterLong numKeyRenames;
+  private @Metric MutableCounterLong numKeyDeletes;
+  private @Metric MutableCounterLong numBucketLists;
+  private @Metric MutableCounterLong numKeyLists;
+  private @Metric MutableCounterLong numVolumeLists;
+  private @Metric MutableCounterLong numKeyCommits;
+  private @Metric MutableCounterLong numAllocateBlockCalls;
+  private @Metric MutableCounterLong numGetServiceLists;
+
+  // Failure Metrics
+  private @Metric MutableCounterLong numVolumeCreateFails;
+  private @Metric MutableCounterLong numVolumeUpdateFails;
+  private @Metric MutableCounterLong numVolumeInfoFails;
+  private @Metric MutableCounterLong numVolumeDeleteFails;
+  private @Metric MutableCounterLong numBucketCreateFails;
+  private @Metric MutableCounterLong numVolumeCheckAccessFails;
+  private @Metric MutableCounterLong numBucketInfoFails;
+  private @Metric MutableCounterLong numBucketUpdateFails;
+  private @Metric MutableCounterLong numBucketDeleteFails;
+  private @Metric MutableCounterLong numKeyAllocateFails;
+  private @Metric MutableCounterLong numKeyLookupFails;
+  private @Metric MutableCounterLong numKeyRenameFails;
+  private @Metric MutableCounterLong numKeyDeleteFails;
+  private @Metric MutableCounterLong numBucketListFails;
+  private @Metric MutableCounterLong numKeyListFails;
+  private @Metric MutableCounterLong numVolumeListFails;
+  private @Metric MutableCounterLong numKeyCommitFails;
+  private @Metric MutableCounterLong numBlockAllocateCallFails;
+  private @Metric MutableCounterLong numGetServiceListFails;
+
+  public OMMetrics() {
+  }
+
+  public static OMMetrics create() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    return ms.register(SOURCE_NAME,
+        "Oozne Manager Metrics",
+        new OMMetrics());
+  }
+
+  public void incNumVolumeCreates() {
+    numVolumeOps.incr();
+    numVolumeCreates.incr();
+  }
+
+  public void incNumVolumeUpdates() {
+    numVolumeOps.incr();
+    numVolumeUpdates.incr();
+  }
+
+  public void incNumVolumeInfos() {
+    numVolumeOps.incr();
+    numVolumeInfos.incr();
+  }
+
+  public void incNumVolumeDeletes() {
+    numVolumeOps.incr();
+    numVolumeDeletes.incr();
+  }
+
+  public void incNumVolumeCheckAccesses() {
+    numVolumeOps.incr();
+    numVolumeCheckAccesses.incr();
+  }
+
+  public void incNumBucketCreates() {
+    numBucketOps.incr();
+    numBucketCreates.incr();
+  }
+
+  public void incNumBucketInfos() {
+    numBucketOps.incr();
+    numBucketInfos.incr();
+  }
+
+  public void incNumBucketUpdates() {
+    numBucketOps.incr();
+    numBucketUpdates.incr();
+  }
+
+  public void incNumBucketDeletes() {
+    numBucketOps.incr();
+    numBucketDeletes.incr();
+  }
+
+  public void incNumBucketLists() {
+    numBucketOps.incr();
+    numBucketLists.incr();
+  }
+
+  public void incNumKeyLists() {
+    numKeyOps.incr();
+    numKeyLists.incr();
+  }
+
+  public void incNumVolumeLists() {
+    numVolumeOps.incr();
+    numVolumeLists.incr();
+  }
+
+  public void incNumGetServiceLists() {
+    numGetServiceLists.incr();
+  }
+
+  public void incNumVolumeCreateFails() {
+    numVolumeCreateFails.incr();
+  }
+
+  public void incNumVolumeUpdateFails() {
+    numVolumeUpdateFails.incr();
+  }
+
+  public void incNumVolumeInfoFails() {
+    numVolumeInfoFails.incr();
+  }
+
+  public void incNumVolumeDeleteFails() {
+    numVolumeDeleteFails.incr();
+  }
+
+  public void incNumVolumeCheckAccessFails() {
+    numVolumeCheckAccessFails.incr();
+  }
+
+  public void incNumBucketCreateFails() {
+    numBucketCreateFails.incr();
+  }
+
+  public void incNumBucketInfoFails() {
+    numBucketInfoFails.incr();
+  }
+
+  public void incNumBucketUpdateFails() {
+    numBucketUpdateFails.incr();
+  }
+
+  public void incNumBucketDeleteFails() {
+    numBucketDeleteFails.incr();
+  }
+
+  public void incNumKeyAllocates() {
+    numKeyOps.incr();
+    numKeyAllocate.incr();
+  }
+
+  public void incNumKeyAllocateFails() {
+    numKeyAllocateFails.incr();
+  }
+
+  public void incNumKeyLookups() {
+    numKeyOps.incr();
+    numKeyLookup.incr();
+  }
+
+  public void incNumKeyLookupFails() {
+    numKeyLookupFails.incr();
+  }
+
+  public void incNumKeyRenames() {
+    numKeyOps.incr();
+    numKeyRenames.incr();
+  }
+
+  public void incNumKeyRenameFails() {
+    numKeyOps.incr();
+    numKeyRenameFails.incr();
+  }
+
+  public void incNumKeyDeleteFails() {
+    numKeyDeleteFails.incr();
+  }
+
+  public void incNumKeyDeletes() {
+    numKeyOps.incr();
+    numKeyDeletes.incr();
+  }
+
+  public void incNumKeyCommits() {
+    numKeyOps.incr();
+    numKeyCommits.incr();
+  }
+
+  public void incNumKeyCommitFails() {
+    numKeyCommitFails.incr();
+  }
+
+  public void incNumBlockAllocateCalls() {
+    numAllocateBlockCalls.incr();
+  }
+
+  public void incNumBlockAllocateCallFails() {
+    numBlockAllocateCallFails.incr();
+  }
+
+  public void incNumBucketListFails() {
+    numBucketListFails.incr();
+  }
+
+  public void incNumKeyListFails() {
+    numKeyListFails.incr();
+  }
+
+  public void incNumVolumeListFails() {
+    numVolumeListFails.incr();
+  }
+
+  public void incNumGetServiceListFails() {
+    numGetServiceListFails.incr();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCreates() {
+    return numVolumeCreates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeUpdates() {
+    return numVolumeUpdates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeInfos() {
+    return numVolumeInfos.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeDeletes() {
+    return numVolumeDeletes.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCheckAccesses() {
+    return numVolumeCheckAccesses.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketCreates() {
+    return numBucketCreates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketInfos() {
+    return numBucketInfos.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketUpdates() {
+    return numBucketUpdates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketDeletes() {
+    return numBucketDeletes.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketLists() {
+    return numBucketLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeLists() {
+    return numVolumeLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyLists() {
+    return numKeyLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumGetServiceLists() {
+    return numGetServiceLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCreateFails() {
+    return numVolumeCreateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeUpdateFails() {
+    return numVolumeUpdateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeInfoFails() {
+    return numVolumeInfoFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeDeleteFails() {
+    return numVolumeDeleteFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCheckAccessFails() {
+    return numVolumeCheckAccessFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketCreateFails() {
+    return numBucketCreateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketInfoFails() {
+    return numBucketInfoFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketUpdateFails() {
+    return numBucketUpdateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketDeleteFails() {
+    return numBucketDeleteFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyAllocates() {
+    return numKeyAllocate.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyAllocateFails() {
+    return numKeyAllocateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyLookups() {
+    return numKeyLookup.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyLookupFails() {
+    return numKeyLookupFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyRenames() {
+    return numKeyRenames.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyRenameFails() {
+    return numKeyRenameFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyDeletes() {
+    return numKeyDeletes.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyDeletesFails() {
+    return numKeyDeleteFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketListFails() {
+    return numBucketListFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyListFails() {
+    return numKeyListFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeListFails() {
+    return numVolumeListFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyCommits() {
+    return numKeyCommits.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyCommitFails() {
+    return numKeyCommitFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBlockAllocates() {
+    return numAllocateBlockCalls.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBlockAllocateFails() {
+    return numBlockAllocateCallFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumGetServiceListFails() {
+    return numGetServiceListFails.value();
+  }
+
+  public void unRegister() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.unregisterSource(SOURCE_NAME);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
new file mode 100644
index 0000000..3820aed
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+
+/**
+ * OMStorage is responsible for management of the StorageDirectories used by
+ * the Ozone Manager.
+ */
+public class OMStorage extends Storage {
+
+  public static final String STORAGE_DIR = "om";
+  public static final String OM_ID = "omUuid";
+
+  /**
+   * Construct OMStorage.
+   * @throws IOException if any directories are inaccessible.
+   */
+  public OMStorage(OzoneConfiguration conf) throws IOException {
+    super(NodeType.OM, getOzoneMetaDirPath(conf), STORAGE_DIR);
+  }
+
+  public void setScmId(String scmId) throws IOException {
+    if (getState() == StorageState.INITIALIZED) {
+      throw new IOException("OM is already initialized.");
+    } else {
+      getStorageInfo().setProperty(SCM_ID, scmId);
+    }
+  }
+
+  public void setOmId(String omId) throws IOException {
+    if (getState() == StorageState.INITIALIZED) {
+      throw new IOException("OM is already initialized.");
+    } else {
+      getStorageInfo().setProperty(OM_ID, omId);
+    }
+  }
+
+  /**
+   * Retrieves the SCM ID from the version file.
+   * @return SCM_ID
+   */
+  public String getScmId() {
+    return getStorageInfo().getProperty(SCM_ID);
+  }
+
+  /**
+   * Retrieves the OM ID from the version file.
+   * @return OM_ID
+   */
+  public String getOmId() {
+    return getStorageInfo().getProperty(OM_ID);
+  }
+
+  @Override
+  protected Properties getNodeProperties() {
+    String omId = getOmId();
+    if (omId == null) {
+      omId = UUID.randomUUID().toString();
+    }
+    Properties omProperties = new Properties();
+    omProperties.setProperty(OM_ID, omId);
+    return omProperties;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
new file mode 100644
index 0000000..21d2411
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -0,0 +1,526 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BatchOperation;
+import org.apache.hadoop.utils.MetadataKeyFilters;
+import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
+import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
+import org.apache.hadoop.utils.MetadataStore;
+import org.apache.hadoop.utils.MetadataStoreBuilder;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR;
+import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_DB_CACHE_SIZE_MB;
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+
+/**
+ * Ozone metadata manager interface.
+ */
+public class OmMetadataManagerImpl implements OMMetadataManager {
+
+  private final MetadataStore store;
+  private final ReadWriteLock lock;
+  private final long openKeyExpireThresholdMS;
+
+  public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
+    File metaDir = getOzoneMetaDirPath(conf);
+    final int cacheSize = conf.getInt(OZONE_OM_DB_CACHE_SIZE_MB,
+        OZONE_OM_DB_CACHE_SIZE_DEFAULT);
+    File omDBFile = new File(metaDir.getPath(), OM_DB_NAME);
+    this.store = MetadataStoreBuilder.newBuilder()
+        .setConf(conf)
+        .setDbFile(omDBFile)
+        .setCacheSize(cacheSize * OzoneConsts.MB)
+        .build();
+    this.lock = new ReentrantReadWriteLock();
+    this.openKeyExpireThresholdMS = 1000 * conf.getInt(
+        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
+        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
+  }
+
+  /**
+   * Start metadata manager.
+   */
+  @Override
+  public void start() {
+
+  }
+
+  /**
+   * Stop metadata manager.
+   */
+  @Override
+  public void stop() throws IOException {
+    if (store != null) {
+      store.close();
+    }
+  }
+
+  /**
+   * Get metadata store.
+   * @return store - metadata store.
+   */
+  @VisibleForTesting
+  @Override
+  public MetadataStore getStore() {
+    return store;
+  }
+
+  /**
+   * Given a volume return the corresponding DB key.
+   * @param volume - Volume name
+   */
+  public byte[] getVolumeKey(String volume) {
+    String dbVolumeName = OzoneConsts.OM_VOLUME_PREFIX + volume;
+    return DFSUtil.string2Bytes(dbVolumeName);
+  }
+
+  /**
+   * Given a user return the corresponding DB key.
+   * @param user - User name
+   */
+  public byte[] getUserKey(String user) {
+    String dbUserName = OzoneConsts.OM_USER_PREFIX + user;
+    return DFSUtil.string2Bytes(dbUserName);
+  }
+
+  /**
+   * Given a volume and bucket, return the corresponding DB key.
+   * @param volume - User name
+   * @param bucket - Bucket name
+   */
+  public byte[] getBucketKey(String volume, String bucket) {
+    String bucketKeyString = OzoneConsts.OM_VOLUME_PREFIX + volume
+        + OzoneConsts.OM_BUCKET_PREFIX + bucket;
+    return DFSUtil.string2Bytes(bucketKeyString);
+  }
+
+  /**
+   * @param volume
+   * @param bucket
+   * @return
+   */
+  private String getBucketWithDBPrefix(String volume, String bucket) {
+    StringBuffer sb = new StringBuffer();
+    sb.append(OzoneConsts.OM_VOLUME_PREFIX)
+        .append(volume)
+        .append(OzoneConsts.OM_BUCKET_PREFIX);
+    if (!Strings.isNullOrEmpty(bucket)) {
+      sb.append(bucket);
+    }
+    return sb.toString();
+  }
+
+  @Override
+  public String getKeyWithDBPrefix(String volume, String bucket, String key) {
+    String keyVB = OzoneConsts.OM_KEY_PREFIX + volume
+        + OzoneConsts.OM_KEY_PREFIX + bucket
+        + OzoneConsts.OM_KEY_PREFIX;
+    return Strings.isNullOrEmpty(key) ? keyVB : keyVB + key;
+  }
+
+  @Override
+  public byte[] getDBKeyBytes(String volume, String bucket, String key) {
+    return DFSUtil.string2Bytes(getKeyWithDBPrefix(volume, bucket, key));
+  }
+
+  @Override
+  public byte[] getDeletedKeyName(byte[] keyName) {
+    return DFSUtil.string2Bytes(
+        DELETING_KEY_PREFIX + DFSUtil.bytes2String(keyName));
+  }
+
+  @Override
+  public byte[] getOpenKeyNameBytes(String keyName, int id) {
+    return DFSUtil.string2Bytes(OPEN_KEY_PREFIX + id +
+        OPEN_KEY_ID_DELIMINATOR + keyName);
+  }
+
+  /**
+   * Returns the read lock used on Metadata DB.
+   * @return readLock
+   */
+  @Override
+  public Lock readLock() {
+    return lock.readLock();
+  }
+
+  /**
+   * Returns the write lock used on Metadata DB.
+   * @return writeLock
+   */
+  @Override
+  public Lock writeLock() {
+    return lock.writeLock();
+  }
+
+  /**
+   * Returns the value associated with this key.
+   * @param key - key
+   * @return value
+   */
+  @Override
+  public byte[] get(byte[] key) throws IOException {
+    return store.get(key);
+  }
+
+  /**
+   * Puts a Key into Metadata DB.
+   * @param key   - key
+   * @param value - value
+   */
+  @Override
+  public void put(byte[] key, byte[] value) throws IOException {
+    store.put(key, value);
+  }
+
+  /**
+   * Deletes a Key from Metadata DB.
+   * @param key   - key
+   */
+  public void delete(byte[] key) throws IOException {
+    store.delete(key);
+  }
+
+  @Override
+  public void writeBatch(BatchOperation batch) throws IOException {
+    this.store.writeBatch(batch);
+  }
+
+  /**
+   * Given a volume, check if it is empty, i.e there are no buckets inside it.
+   * @param volume - Volume name
+   * @return true if the volume is empty
+   */
+  public boolean isVolumeEmpty(String volume) throws IOException {
+    String dbVolumeRootName = OzoneConsts.OM_VOLUME_PREFIX + volume
+        + OzoneConsts.OM_BUCKET_PREFIX;
+    byte[] dbVolumeRootKey = DFSUtil.string2Bytes(dbVolumeRootName);
+    ImmutablePair<byte[], byte[]> volumeRoot =
+        store.peekAround(0, dbVolumeRootKey);
+    if (volumeRoot != null) {
+      return !DFSUtil.bytes2String(volumeRoot.getKey())
+          .startsWith(dbVolumeRootName);
+    }
+    return true;
+  }
+
+  /**
+   * Given a volume/bucket, check if it is empty,
+   * i.e there are no keys inside it.
+   * @param volume - Volume name
+   * @param bucket - Bucket name
+   * @return true if the bucket is empty
+   */
+  public boolean isBucketEmpty(String volume, String bucket)
+      throws IOException {
+    String keyRootName = getKeyWithDBPrefix(volume, bucket, null);
+    byte[] keyRoot = DFSUtil.string2Bytes(keyRootName);
+    ImmutablePair<byte[], byte[]> firstKey = store.peekAround(0, keyRoot);
+    if (firstKey != null) {
+      return !DFSUtil.bytes2String(firstKey.getKey())
+          .startsWith(keyRootName);
+    }
+    return true;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(final String volumeName,
+                                        final String startBucket, final String bucketPrefix,
+                                        final int maxNumOfBuckets) throws IOException {
+    List<OmBucketInfo> result = new ArrayList<>();
+    if (Strings.isNullOrEmpty(volumeName)) {
+      throw new OMException("Volume name is required.",
+          ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+
+    byte[] volumeNameBytes = getVolumeKey(volumeName);
+    if (store.get(volumeNameBytes) == null) {
+      throw new OMException("Volume " + volumeName + " not found.",
+          ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+
+
+    // A bucket starts with /#volume/#bucket_prefix
+    MetadataKeyFilter filter = (preKey, currentKey, nextKey) -> {
+      if (currentKey != null) {
+        String bucketNamePrefix =
+                getBucketWithDBPrefix(volumeName, bucketPrefix);
+        String bucket = DFSUtil.bytes2String(currentKey);
+        return bucket.startsWith(bucketNamePrefix);
+      }
+      return false;
+    };
+
+    List<Map.Entry<byte[], byte[]>> rangeResult;
+    if (!Strings.isNullOrEmpty(startBucket)) {
+      // Since we are excluding start key from the result,
+      // the maxNumOfBuckets is incremented.
+      rangeResult = store.getSequentialRangeKVs(
+          getBucketKey(volumeName, startBucket),
+          maxNumOfBuckets + 1, filter);
+      if (!rangeResult.isEmpty()) {
+        //Remove start key from result.
+        rangeResult.remove(0);
+      }
+    } else {
+      rangeResult = store.getSequentialRangeKVs(null, maxNumOfBuckets, filter);
+    }
+
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmBucketInfo info = OmBucketInfo.getFromProtobuf(
+          BucketInfo.parseFrom(entry.getValue()));
+      result.add(info);
+    }
+    return result;
+  }
+
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String keyPrefix, int maxKeys) throws IOException {
+    List<OmKeyInfo> result = new ArrayList<>();
+    if (Strings.isNullOrEmpty(volumeName)) {
+      throw new OMException("Volume name is required.",
+          ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+
+    if (Strings.isNullOrEmpty(bucketName)) {
+      throw new OMException("Bucket name is required.",
+          ResultCodes.FAILED_BUCKET_NOT_FOUND);
+    }
+
+    byte[] bucketNameBytes = getBucketKey(volumeName, bucketName);
+    if (store.get(bucketNameBytes) == null) {
+      throw new OMException("Bucket " + bucketName + " not found.",
+          ResultCodes.FAILED_BUCKET_NOT_FOUND);
+    }
+
+    MetadataKeyFilter filter = new KeyPrefixFilter()
+        .addFilter(getKeyWithDBPrefix(volumeName, bucketName, keyPrefix));
+
+    List<Map.Entry<byte[], byte[]>> rangeResult;
+    if (!Strings.isNullOrEmpty(startKey)) {
+      //Since we are excluding start key from the result,
+      // the maxNumOfBuckets is incremented.
+      rangeResult = store.getSequentialRangeKVs(
+          getDBKeyBytes(volumeName, bucketName, startKey),
+          maxKeys + 1, filter);
+      if (!rangeResult.isEmpty()) {
+        //Remove start key from result.
+        rangeResult.remove(0);
+      }
+    } else {
+      rangeResult = store.getSequentialRangeKVs(null, maxKeys, filter);
+    }
+
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmKeyInfo info = OmKeyInfo.getFromProtobuf(
+          KeyInfo.parseFrom(entry.getValue()));
+      result.add(info);
+    }
+    return result;
+  }
+
+  @Override
+  public List<OmVolumeArgs> listVolumes(String userName,
+                                        String prefix, String startKey, int maxKeys) throws IOException {
+    List<OmVolumeArgs> result = Lists.newArrayList();
+    VolumeList volumes;
+    if (Strings.isNullOrEmpty(userName)) {
+      volumes = getAllVolumes();
+    } else {
+      volumes = getVolumesByUser(userName);
+    }
+
+    if (volumes == null || volumes.getVolumeNamesCount() == 0) {
+      return result;
+    }
+
+    boolean startKeyFound = Strings.isNullOrEmpty(startKey);
+    for (String volumeName : volumes.getVolumeNamesList()) {
+      if (!Strings.isNullOrEmpty(prefix)) {
+        if (!volumeName.startsWith(prefix)) {
+          continue;
+        }
+      }
+
+      if (!startKeyFound && volumeName.equals(startKey)) {
+        startKeyFound = true;
+        continue;
+      }
+      if (startKeyFound && result.size() < maxKeys) {
+        byte[] volumeInfo = store.get(this.getVolumeKey(volumeName));
+        if (volumeInfo == null) {
+          // Could not get volume info by given volume name,
+          // since the volume name is loaded from db,
+          // this probably means om db is corrupted or some entries are
+          // accidentally removed.
+          throw new OMException("Volume info not found for " + volumeName,
+              ResultCodes.FAILED_VOLUME_NOT_FOUND);
+        }
+        VolumeInfo info = VolumeInfo.parseFrom(volumeInfo);
+        OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(info);
+        result.add(volumeArgs);
+      }
+    }
+
+    return result;
+  }
+
+  private VolumeList getVolumesByUser(String userName)
+      throws OMException {
+    return getVolumesByUser(getUserKey(userName));
+  }
+
+  private VolumeList getVolumesByUser(byte[] userNameKey)
+      throws OMException {
+    VolumeList volumes = null;
+    try {
+      byte[] volumesInBytes = store.get(userNameKey);
+      if (volumesInBytes == null) {
+        // No volume found for this user, return an empty list
+        return VolumeList.newBuilder().build();
+      }
+      volumes = VolumeList.parseFrom(volumesInBytes);
+    } catch (IOException e) {
+      throw new OMException("Unable to get volumes info by the given user, "
+          + "metadata might be corrupted", e,
+          ResultCodes.FAILED_METADATA_ERROR);
+    }
+    return volumes;
+  }
+
+  private VolumeList getAllVolumes() throws IOException {
+    // Scan all users in database
+    KeyPrefixFilter filter =
+        new KeyPrefixFilter().addFilter(OzoneConsts.OM_USER_PREFIX);
+    // We are not expecting a huge number of users per cluster,
+    // it should be fine to scan all users in db and return us a
+    // list of volume names in string per user.
+    List<Map.Entry<byte[], byte[]>> rangeKVs = store
+        .getSequentialRangeKVs(null, Integer.MAX_VALUE, filter);
+
+    VolumeList.Builder builder = VolumeList.newBuilder();
+    for (Map.Entry<byte[], byte[]> entry : rangeKVs) {
+      VolumeList volumes = this.getVolumesByUser(entry.getKey());
+      builder.addAllVolumeNames(volumes.getVolumeNamesList());
+    }
+
+    return builder.build();
+  }
+
+  @Override
+  public List<BlockGroup> getPendingDeletionKeys(final int count)
+      throws IOException {
+    List<BlockGroup> keyBlocksList = Lists.newArrayList();
+    List<Map.Entry<byte[], byte[]>> rangeResult =
+        store.getRangeKVs(null, count,
+            MetadataKeyFilters.getDeletingKeyFilter());
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmKeyInfo info =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
+      // Get block keys as a list.
+      OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
+      if (latest == null) {
+        return Collections.emptyList();
+      }
+      List<BlockID> item = latest.getLocationList().stream()
+          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
+          .collect(Collectors.toList());
+      BlockGroup keyBlocks = BlockGroup.newBuilder()
+          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
+          .addAllBlockIDs(item)
+          .build();
+      keyBlocksList.add(keyBlocks);
+    }
+    return keyBlocksList;
+  }
+
+  @Override
+  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
+    List<BlockGroup> keyBlocksList = Lists.newArrayList();
+    long now = Time.now();
+    final MetadataKeyFilter openKeyFilter =
+        new KeyPrefixFilter().addFilter(OPEN_KEY_PREFIX);
+    List<Map.Entry<byte[], byte[]>> rangeResult =
+        store.getSequentialRangeKVs(null, Integer.MAX_VALUE,
+            openKeyFilter);
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmKeyInfo info =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
+      long lastModify = info.getModificationTime();
+      if (now - lastModify < this.openKeyExpireThresholdMS) {
+        // consider as may still be active, not hanging.
+        continue;
+      }
+      // Get block keys as a list.
+      List<BlockID> item = info.getLatestVersionLocations()
+          .getBlocksLatestVersionOnly().stream()
+          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
+          .collect(Collectors.toList());
+      BlockGroup keyBlocks = BlockGroup.newBuilder()
+          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
+          .addAllBlockIDs(item)
+          .build();
+      keyBlocksList.add(keyBlocks);
+    }
+    return keyBlocksList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
new file mode 100644
index 0000000..8d94f5a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.BackgroundTask;
+import org.apache.hadoop.utils.BackgroundTaskQueue;
+import org.apache.hadoop.utils.BackgroundTaskResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This is the background service to delete hanging open keys.
+ * Scan the metadata of om periodically to get
+ * the keys with prefix "#open#" and ask scm to
+ * delete metadata accordingly, if scm returns
+ * success for keys, then clean up those keys.
+ */
+public class OpenKeyCleanupService extends BackgroundService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpenKeyCleanupService.class);
+
+  private final static int OPEN_KEY_DELETING_CORE_POOL_SIZE = 2;
+
+  private final KeyManager keyManager;
+  private final ScmBlockLocationProtocol scmClient;
+
+  public OpenKeyCleanupService(ScmBlockLocationProtocol scmClient,
+      KeyManager keyManager, int serviceInterval,
+      long serviceTimeout) {
+    super("OpenKeyCleanupService", serviceInterval, TimeUnit.SECONDS,
+        OPEN_KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
+    this.keyManager = keyManager;
+    this.scmClient = scmClient;
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new OpenKeyDeletingTask());
+    return queue;
+  }
+
+  private class OpenKeyDeletingTask
+      implements BackgroundTask<BackgroundTaskResult> {
+
+    @Override
+    public int getPriority() {
+      return 0;
+    }
+
+    @Override
+    public BackgroundTaskResult call() throws Exception {
+      try {
+        List<BlockGroup> keyBlocksList = keyManager.getExpiredOpenKeys();
+        if (keyBlocksList.size() > 0) {
+          int toDeleteSize = keyBlocksList.size();
+          LOG.debug("Found {} to-delete open keys in OM", toDeleteSize);
+          List<DeleteBlockGroupResult> results =
+              scmClient.deleteKeyBlocks(keyBlocksList);
+          int deletedSize = 0;
+          for (DeleteBlockGroupResult result : results) {
+            if (result.isSuccess()) {
+              try {
+                keyManager.deleteExpiredOpenKey(result.getObjectKey());
+                LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
+                deletedSize += 1;
+              } catch (IOException e) {
+                LOG.warn("Failed to delete hanging-open key {}",
+                    result.getObjectKey(), e);
+              }
+            } else {
+              LOG.warn("Deleting open Key {} failed because some of the blocks"
+                      + " were failed to delete, failed blocks: {}",
+                  result.getObjectKey(),
+                  StringUtils.join(",", result.getFailedBlocks()));
+            }
+          }
+          LOG.info("Found {} expired open key entries, successfully " +
+              "cleaned up {} entries", toDeleteSize, deletedSize);
+          return results::size;
+        } else {
+          LOG.debug("No hanging open key found in OM");
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to get hanging open keys, retry in"
+            + " next interval", e);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
new file mode 100644
index 0000000..71fa921
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -0,0 +1,911 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ServicePort;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB
+    .ScmBlockLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.protocolPB
+    .StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.StringUtils;
+
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
+import static org.apache.hadoop.hdds.HddsUtils.isHddsEnabled;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
+import static org.apache.hadoop.hdds.server.ServerUtils
+    .updateRPCListenAddress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneManagerService
+    .newReflectiveBlockingService;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
+    .NodeState.HEALTHY;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
+/**
+ * Ozone Manager is the metadata manager of ozone.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
+public final class OzoneManager extends ServiceRuntimeInfoImpl
+    implements OzoneManagerProtocol, OMMXBean {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OzoneManager.class);
+
+  private static final String USAGE =
+      "Usage: \n ozone om [genericOptions] " + "[ "
+          + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone om [ "
+          + StartupOption.HELP.getName() + " ]\n";
+
+  /** Startup options. */
+  public enum StartupOption {
+    CREATEOBJECTSTORE("-createObjectStore"),
+    HELP("-help"),
+    REGULAR("-regular");
+
+    private final String name;
+
+    StartupOption(String arg) {
+      this.name = arg;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public static StartupOption parse(String value) {
+      for (StartupOption option : StartupOption.values()) {
+        if (option.name.equalsIgnoreCase(value)) {
+          return option;
+        }
+      }
+      return null;
+    }
+  }
+
+  private final OzoneConfiguration configuration;
+  private final RPC.Server omRpcServer;
+  private final InetSocketAddress omRpcAddress;
+  private final OMMetadataManager metadataManager;
+  private final VolumeManager volumeManager;
+  private final BucketManager bucketManager;
+  private final KeyManager keyManager;
+  private final OMMetrics metrics;
+  private final OzoneManagerHttpServer httpServer;
+  private final OMStorage omStorage;
+  private final ScmBlockLocationProtocol scmBlockClient;
+  private final StorageContainerLocationProtocol scmContainerClient;
+  private ObjectName omInfoBeanName;
+
+  private OzoneManager(OzoneConfiguration conf) throws IOException {
+    Preconditions.checkNotNull(conf);
+    configuration = conf;
+    omStorage = new OMStorage(conf);
+    scmBlockClient = getScmBlockClient(configuration);
+    scmContainerClient = getScmContainerClient(configuration);
+    if (omStorage.getState() != StorageState.INITIALIZED) {
+      throw new OMException("OM not initialized.",
+          ResultCodes.OM_NOT_INITIALIZED);
+    }
+
+    // verifies that the SCM info in the OM Version file is correct.
+    ScmInfo scmInfo = scmBlockClient.getScmInfo();
+    if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo
+        .getScmId().equals(omStorage.getScmId()))) {
+      throw new OMException("SCM version info mismatch.",
+          ResultCodes.SCM_VERSION_MISMATCH_ERROR);
+    }
+    final int handlerCount = conf.getInt(OZONE_OM_HANDLER_COUNT_KEY,
+        OZONE_OM_HANDLER_COUNT_DEFAULT);
+
+    RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class,
+        ProtobufRpcEngine.class);
+
+    BlockingService omService = newReflectiveBlockingService(
+        new OzoneManagerProtocolServerSideTranslatorPB(this));
+    final InetSocketAddress omNodeRpcAddr =
+        getOmAddress(configuration);
+    omRpcServer = startRpcServer(configuration, omNodeRpcAddr,
+        OzoneManagerProtocolPB.class, omService,
+        handlerCount);
+    omRpcAddress = updateRPCListenAddress(configuration,
+        OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer);
+    metadataManager = new OmMetadataManagerImpl(configuration);
+    volumeManager = new VolumeManagerImpl(metadataManager, configuration);
+    bucketManager = new BucketManagerImpl(metadataManager);
+    metrics = OMMetrics.create();
+    keyManager =
+        new KeyManagerImpl(scmBlockClient, metadataManager, configuration,
+            omStorage.getOmId());
+    httpServer = new OzoneManagerHttpServer(configuration, this);
+  }
+
+  /**
+   * Create a scm block client, used by putKey() and getKey().
+   *
+   * @return {@link ScmBlockLocationProtocol}
+   * @throws IOException
+   */
+  private static ScmBlockLocationProtocol getScmBlockClient(
+      OzoneConfiguration conf) throws IOException {
+    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    long scmVersion =
+        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
+    InetSocketAddress scmBlockAddress =
+        getScmAddressForBlockClients(conf);
+    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
+        new ScmBlockLocationProtocolClientSideTranslatorPB(
+            RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
+                scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
+                NetUtils.getDefaultSocketFactory(conf),
+                Client.getRpcTimeout(conf)));
+    return scmBlockLocationClient;
+  }
+
+  /**
+   * Returns a scm container client.
+   *
+   * @return {@link StorageContainerLocationProtocol}
+   * @throws IOException
+   */
+  private static StorageContainerLocationProtocol getScmContainerClient(
+      OzoneConfiguration conf) throws IOException {
+    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    long scmVersion =
+        RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
+    InetSocketAddress scmAddr = getScmAddressForClients(
+        conf);
+    StorageContainerLocationProtocolClientSideTranslatorPB scmContainerClient =
+        new StorageContainerLocationProtocolClientSideTranslatorPB(
+            RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
+                scmAddr, UserGroupInformation.getCurrentUser(), conf,
+                NetUtils.getDefaultSocketFactory(conf),
+                Client.getRpcTimeout(conf)));
+    return scmContainerClient;
+  }
+
+  @VisibleForTesting
+  public KeyManager getKeyManager() {
+    return keyManager;
+  }
+
+  @VisibleForTesting
+  public ScmInfo getScmInfo() throws IOException {
+    return scmBlockClient.getScmInfo();
+  }
+
+  @VisibleForTesting
+  public OMStorage getOmStorage() {
+    return omStorage;
+  }
+  /**
+   * Starts an RPC server, if configured.
+   *
+   * @param conf configuration
+   * @param addr configured address of RPC server
+   * @param protocol RPC protocol provided by RPC server
+   * @param instance RPC protocol implementation instance
+   * @param handlerCount RPC server handler count
+   *
+   * @return RPC server
+   * @throws IOException if there is an I/O error while creating RPC server
+   */
+  private static RPC.Server startRpcServer(OzoneConfiguration conf,
+      InetSocketAddress addr, Class<?> protocol, BlockingService instance,
+      int handlerCount) throws IOException {
+    RPC.Server rpcServer = new RPC.Builder(conf)
+        .setProtocol(protocol)
+        .setInstance(instance)
+        .setBindAddress(addr.getHostString())
+        .setPort(addr.getPort())
+        .setNumHandlers(handlerCount)
+        .setVerbose(false)
+        .setSecretManager(null)
+        .build();
+
+    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
+    return rpcServer;
+  }
+
+  /**
+   * Get metadata manager.
+   * @return metadata manager.
+   */
+  public OMMetadataManager getMetadataManager() {
+    return metadataManager;
+  }
+
+  public OMMetrics getMetrics() {
+    return metrics;
+  }
+
+  /**
+   * Main entry point for starting OzoneManager.
+   *
+   * @param argv arguments
+   * @throws IOException if startup fails due to I/O error
+   */
+  public static void main(String[] argv) throws IOException {
+    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+    try {
+      OzoneConfiguration conf = new OzoneConfiguration();
+      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
+      if (!hParser.isParseSuccessful()) {
+        System.err.println("USAGE: " + USAGE + " \n");
+        hParser.printGenericCommandUsage(System.err);
+        System.exit(1);
+      }
+      StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
+      OzoneManager om = createOm(hParser.getRemainingArgs(), conf);
+      if (om != null) {
+        om.start();
+        om.join();
+      }
+    } catch (Throwable t) {
+      LOG.error("Failed to start the OzoneManager.", t);
+      terminate(1, t);
+    }
+  }
+
+  private static void printUsage(PrintStream out) {
+    out.println(USAGE + "\n");
+  }
+
+  /**
+   * Constructs OM instance based on command line arguments.
+   * @param argv Command line arguments
+   * @param conf OzoneConfiguration
+   * @return OM instance
+   * @throws IOException in case OM instance creation fails.
+   */
+
+  public static OzoneManager createOm(String[] argv,
+                                      OzoneConfiguration conf) throws IOException {
+    if (!isHddsEnabled(conf)) {
+      System.err.println("OM cannot be started in secure mode or when " +
+          OZONE_ENABLED + " is set to false");
+      System.exit(1);
+    }
+    StartupOption startOpt = parseArguments(argv);
+    if (startOpt == null) {
+      printUsage(System.err);
+      terminate(1);
+      return null;
+    }
+    switch (startOpt) {
+    case CREATEOBJECTSTORE:
+      terminate(omInit(conf) ? 0 : 1);
+      return null;
+    case HELP:
+      printUsage(System.err);
+      terminate(0);
+      return null;
+    default:
+      return new OzoneManager(conf);
+    }
+  }
+
+  /**
+   * Initializes the OM instance.
+   * @param conf OzoneConfiguration
+   * @return true if OM initialization succeeds, false otherwise
+   * @throws IOException in case ozone metadata directory path is not accessible
+   */
+
+  private static boolean omInit(OzoneConfiguration conf) throws IOException {
+    OMStorage omStorage = new OMStorage(conf);
+    StorageState state = omStorage.getState();
+    if (state != StorageState.INITIALIZED) {
+      try {
+        ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(conf);
+        ScmInfo scmInfo = scmBlockClient.getScmInfo();
+        String clusterId = scmInfo.getClusterId();
+        String scmId = scmInfo.getScmId();
+        if (clusterId == null || clusterId.isEmpty()) {
+          throw new IOException("Invalid Cluster ID");
+        }
+        if (scmId == null || scmId.isEmpty()) {
+          throw new IOException("Invalid SCM ID");
+        }
+        omStorage.setClusterId(clusterId);
+        omStorage.setScmId(scmId);
+        omStorage.initialize();
+        System.out.println(
+            "OM initialization succeeded.Current cluster id for sd="
+                + omStorage.getStorageDir() + ";cid=" + omStorage
+                .getClusterID());
+        return true;
+      } catch (IOException ioe) {
+        LOG.error("Could not initialize OM version file", ioe);
+        return false;
+      }
+    } else {
+      System.out.println(
+          "OM already initialized.Reusing existing cluster id for sd="
+              + omStorage.getStorageDir() + ";cid=" + omStorage
+              .getClusterID());
+      return true;
+    }
+  }
+
+  /**
+   * Parses the command line options for OM initialization.
+   * @param args command line arguments
+   * @return StartupOption if options are valid, null otherwise
+   */
+  private static StartupOption parseArguments(String[] args) {
+    if (args == null || args.length == 0) {
+      return StartupOption.REGULAR;
+    } else if (args.length == 1) {
+      return StartupOption.parse(args[0]);
+    }
+    return null;
+  }
+
+  /**
+   * Builds a message for logging startup information about an RPC server.
+   *
+   * @param description RPC server description
+   * @param addr RPC server listening address
+   * @return server startup message
+   */
+  private static String buildRpcServerStartMessage(String description,
+      InetSocketAddress addr) {
+    return addr != null ? String.format("%s is listening at %s",
+        description, addr.toString()) :
+        String.format("%s not started", description);
+  }
+
+  /**
+   * Start service.
+   */
+  public void start() throws IOException {
+    LOG.info(buildRpcServerStartMessage("OzoneManager RPC server",
+        omRpcAddress));
+    DefaultMetricsSystem.initialize("OzoneManager");
+    metadataManager.start();
+    keyManager.start();
+    omRpcServer.start();
+    httpServer.start();
+    registerMXBean();
+    setStartTime();
+  }
+
+  /**
+   * Stop service.
+   */
+  public void stop() {
+    try {
+      metadataManager.stop();
+      omRpcServer.stop();
+      keyManager.stop();
+      httpServer.stop();
+      metrics.unRegister();
+      unregisterMXBean();
+    } catch (Exception e) {
+      LOG.error("OzoneManager stop failed.", e);
+    }
+  }
+
+  /**
+   * Wait until service has completed shutdown.
+   */
+  public void join() {
+    try {
+      omRpcServer.join();
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      LOG.info("Interrupted during OzoneManager join.", e);
+    }
+  }
+
+  /**
+   * Creates a volume.
+   *
+   * @param args - Arguments to create Volume.
+   * @throws IOException
+   */
+  @Override
+  public void createVolume(OmVolumeArgs args) throws IOException {
+    try {
+      metrics.incNumVolumeCreates();
+      volumeManager.createVolume(args);
+    } catch (Exception ex) {
+      metrics.incNumVolumeCreateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  @Override
+  public void setOwner(String volume, String owner) throws IOException {
+    try {
+      metrics.incNumVolumeUpdates();
+      volumeManager.setOwner(volume, owner);
+    } catch (Exception ex) {
+      metrics.incNumVolumeUpdateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  @Override
+  public void setQuota(String volume, long quota) throws IOException {
+    try {
+      metrics.incNumVolumeUpdates();
+      volumeManager.setQuota(volume, quota);
+    } catch (Exception ex) {
+      metrics.incNumVolumeUpdateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Checks if the specified user can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acls which needs to be checked for access
+   * @return true if the user has required access for the volume,
+   *         false otherwise
+   * @throws IOException
+   */
+  @Override
+  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException {
+    try {
+      metrics.incNumVolumeCheckAccesses();
+      return volumeManager.checkVolumeAccess(volume, userAcl);
+    } catch (Exception ex) {
+      metrics.incNumVolumeCheckAccessFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Gets the volume information.
+   *
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+    try {
+      metrics.incNumVolumeInfos();
+      return volumeManager.getVolumeInfo(volume);
+    } catch (Exception ex) {
+      metrics.incNumVolumeInfoFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  @Override
+  public void deleteVolume(String volume) throws IOException {
+    try {
+      metrics.incNumVolumeDeletes();
+      volumeManager.deleteVolume(volume);
+    } catch (Exception ex) {
+      metrics.incNumVolumeDeleteFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Lists volume owned by a specific user.
+   *
+   * @param userName - user name
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix,
+                                             String prevKey, int maxKeys) throws IOException {
+    try {
+      metrics.incNumVolumeLists();
+      return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys);
+    } catch (Exception ex) {
+      metrics.incNumVolumeListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Lists volume all volumes in the cluster.
+   *
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listAllVolumes(String prefix, String prevKey, int
+      maxKeys) throws IOException {
+    try {
+      metrics.incNumVolumeLists();
+      return volumeManager.listVolumes(null, prefix, prevKey, maxKeys);
+    } catch (Exception ex) {
+      metrics.incNumVolumeListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Creates a bucket.
+   *
+   * @param bucketInfo - BucketInfo to create bucket.
+   * @throws IOException
+   */
+  @Override
+  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
+    try {
+      metrics.incNumBucketCreates();
+      bucketManager.createBucket(bucketInfo);
+    } catch (Exception ex) {
+      metrics.incNumBucketCreateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(String volumeName,
+                                        String startKey, String prefix, int maxNumOfBuckets)
+      throws IOException {
+    try {
+      metrics.incNumBucketLists();
+      return bucketManager.listBuckets(volumeName,
+          startKey, prefix, maxNumOfBuckets);
+    } catch (IOException ex) {
+      metrics.incNumBucketListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Gets the bucket information.
+   *
+   * @param volume - Volume name.
+   * @param bucket - Bucket name.
+   * @return OmBucketInfo or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmBucketInfo getBucketInfo(String volume, String bucket)
+      throws IOException {
+    try {
+      metrics.incNumBucketInfos();
+      return bucketManager.getBucketInfo(volume, bucket);
+    } catch (Exception ex) {
+      metrics.incNumBucketInfoFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Allocate a key.
+   *
+   * @param args - attributes of the key.
+   * @return OmKeyInfo - the info about the allocated key.
+   * @throws IOException
+   */
+  @Override
+  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+    try {
+      metrics.incNumKeyAllocates();
+      return keyManager.openKey(args);
+    } catch (Exception ex) {
+      metrics.incNumKeyAllocateFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public void commitKey(OmKeyArgs args, int clientID)
+      throws IOException {
+    try {
+      metrics.incNumKeyCommits();
+      keyManager.commitKey(args, clientID);
+    } catch (Exception ex) {
+      metrics.incNumKeyCommitFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException {
+    try {
+      metrics.incNumBlockAllocateCalls();
+      return keyManager.allocateBlock(args, clientID);
+    } catch (Exception ex) {
+      metrics.incNumBlockAllocateCallFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Lookup a key.
+   *
+   * @param args - attributes of the key.
+   * @return OmKeyInfo - the info about the requested key.
+   * @throws IOException
+   */
+  @Override
+  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+    try {
+      metrics.incNumKeyLookups();
+      return keyManager.lookupKey(args);
+    } catch (Exception ex) {
+      metrics.incNumKeyLookupFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+    try {
+      metrics.incNumKeyRenames();
+      keyManager.renameKey(args, toKeyName);
+    } catch (IOException e) {
+      metrics.incNumKeyRenameFails();
+      throw e;
+    }
+  }
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param args - attributes of the key.
+   * @throws IOException
+   */
+  @Override
+  public void deleteKey(OmKeyArgs args) throws IOException {
+    try {
+      metrics.incNumKeyDeletes();
+      keyManager.deleteKey(args);
+    } catch (Exception ex) {
+      metrics.incNumKeyDeleteFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String keyPrefix, int maxKeys) throws IOException {
+    try {
+      metrics.incNumKeyLists();
+      return keyManager.listKeys(volumeName, bucketName,
+          startKey, keyPrefix, maxKeys);
+    } catch (IOException ex) {
+      metrics.incNumKeyListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  @Override
+  public void setBucketProperty(OmBucketArgs args)
+      throws IOException {
+    try {
+      metrics.incNumBucketUpdates();
+      bucketManager.setBucketProperty(args);
+    } catch (Exception ex) {
+      metrics.incNumBucketUpdateFails();
+      throw ex;
+    }
+  }
+
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volume - Name of the volume.
+   * @param bucket - Name of the bucket.
+   * @throws IOException
+   */
+  public void deleteBucket(String volume, String bucket) throws IOException {
+    try {
+      metrics.incNumBucketDeletes();
+      bucketManager.deleteBucket(volume, bucket);
+    } catch (Exception ex) {
+      metrics.incNumBucketDeleteFails();
+      throw ex;
+    }
+  }
+
+  private void registerMXBean() {
+    Map<String, String> jmxProperties = new HashMap<String, String>();
+    jmxProperties.put("component", "ServerRuntime");
+    this.omInfoBeanName =
+        MBeans.register("OzoneManager",
+            "OzoneManagerInfo",
+            jmxProperties,
+            this);
+  }
+
+  private void unregisterMXBean() {
+    if (this.omInfoBeanName != null) {
+      MBeans.unregister(this.omInfoBeanName);
+      this.omInfoBeanName = null;
+    }
+  }
+
+  @Override
+  public String getRpcPort() {
+    return "" + omRpcAddress.getPort();
+  }
+
+  @VisibleForTesting
+  public OzoneManagerHttpServer getHttpServer() {
+    return httpServer;
+  }
+
+  @Override
+  public List<ServiceInfo> getServiceList() throws IOException {
+    // When we implement multi-home this call has to be handled properly.
+    List<ServiceInfo> services = new ArrayList<>();
+    ServiceInfo.Builder omServiceInfoBuilder = ServiceInfo.newBuilder()
+        .setNodeType(HddsProtos.NodeType.OM)
+        .setHostname(omRpcAddress.getHostName())
+        .addServicePort(ServicePort.newBuilder()
+                .setType(ServicePort.Type.RPC)
+                .setValue(omRpcAddress.getPort())
+            .build());
+    if (httpServer.getHttpAddress() != null) {
+      omServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
+          .setType(ServicePort.Type.HTTP)
+          .setValue(httpServer.getHttpAddress().getPort())
+          .build());
+    }
+    if (httpServer.getHttpsAddress() != null) {
+      omServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
+          .setType(ServicePort.Type.HTTPS)
+          .setValue(httpServer.getHttpsAddress().getPort())
+          .build());
+    }
+    services.add(omServiceInfoBuilder.build());
+
+    // For client we have to return SCM with container protocol port,
+    // not block protocol.
+    InetSocketAddress scmAddr = getScmAddressForClients(
+        configuration);
+    ServiceInfo.Builder scmServiceInfoBuilder = ServiceInfo.newBuilder()
+        .setNodeType(HddsProtos.NodeType.SCM)
+        .setHostname(scmAddr.getHostName())
+        .addServicePort(ServicePort.newBuilder()
+            .setType(ServicePort.Type.RPC)
+            .setValue(scmAddr.getPort()).build());
+    services.add(scmServiceInfoBuilder.build());
+
+    List<HddsProtos.Node> nodes = scmContainerClient.queryNode(HEALTHY,
+        HddsProtos.QueryScope.CLUSTER, "");
+
+    for (HddsProtos.Node node : nodes) {
+      HddsProtos.DatanodeDetailsProto datanode = node.getNodeID();
+
+      ServiceInfo.Builder dnServiceInfoBuilder = ServiceInfo.newBuilder()
+          .setNodeType(HddsProtos.NodeType.DATANODE)
+          .setHostname(datanode.getHostName());
+
+      dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
+          .setType(ServicePort.Type.HTTP)
+          .setValue(DatanodeDetails.getFromProtoBuf(datanode)
+              .getPort(DatanodeDetails.Port.Name.REST).getValue())
+          .build());
+
+      services.add(dnServiceInfoBuilder.build());
+    }
+
+    metrics.incNumGetServiceLists();
+    // For now there is no exception that can can happen in this call,
+    // so failure metrics is not handled. In future if there is any need to
+    // handle exception in this method, we need to incorporate
+    // metrics.incNumGetServiceListFails()
+    return services;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
new file mode 100644
index 0000000..bd6ab69
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdds.server.BaseHttpServer;
+
+import java.io.IOException;
+
+/**
+ * HttpServer wrapper for the OzoneManager.
+ */
+public class OzoneManagerHttpServer extends BaseHttpServer {
+
+  public OzoneManagerHttpServer(Configuration conf, OzoneManager om)
+      throws IOException {
+    super(conf, "ozoneManager");
+    addServlet("serviceList", "/serviceList", ServiceListJSONServlet.class);
+    getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om);
+  }
+
+  @Override protected String getHttpAddressKey() {
+    return OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
+  }
+
+  @Override protected String getHttpBindHostKey() {
+    return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY;
+  }
+
+  @Override protected String getHttpsAddressKey() {
+    return OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY;
+  }
+
+  @Override protected String getHttpsBindHostKey() {
+    return OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY;
+  }
+
+  @Override protected String getBindHostDefault() {
+    return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_DEFAULT;
+  }
+
+  @Override protected int getHttpBindPortDefault() {
+    return OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT;
+  }
+
+  @Override protected int getHttpsBindPortDefault() {
+    return OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT;
+  }
+
+  @Override protected String getKeytabFile() {
+    return OMConfigKeys.OZONE_OM_KEYTAB_FILE;
+  }
+
+  @Override protected String getSpnegoPrincipal() {
+    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+  }
+
+  @Override protected String getEnabledKey() {
+    return OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
new file mode 100644
index 0000000..47713e2
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+
+/**
+ * Provides REST access to Ozone Service List.
+ * <p>
+ * This servlet generally will be placed under the /serviceList URL of
+ * OzoneManager HttpServer.
+ *
+ * The return format is of JSON and in the form
+ * <p>
+ *  <code><pre>
+ *  {
+ *    "services" : [
+ *      {
+ *        "NodeType":"OM",
+ *        "Hostname" "$hostname",
+ *        "ports" : {
+ *          "$PortType" : "$port",
+ *          ...
+ *        }
+ *      }
+ *    ]
+ *  }
+ *  </pre></code>
+ *  <p>
+ *
+ */
+public class ServiceListJSONServlet  extends HttpServlet  {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ServiceListJSONServlet.class);
+  private static final long serialVersionUID = 1L;
+
+  private transient OzoneManager om;
+
+  public void init() throws ServletException {
+    this.om = (OzoneManager) getServletContext()
+        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+  }
+
+  /**
+   * Process a GET request for the specified resource.
+   *
+   * @param request
+   *          The servlet request we are processing
+   * @param response
+   *          The servlet response we are creating
+   */
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response) {
+    try {
+      ObjectMapper objectMapper = new ObjectMapper();
+      objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
+      response.setContentType("application/json; charset=utf8");
+      PrintWriter writer = response.getWriter();
+      try {
+        writer.write(objectMapper.writeValueAsString(om.getServiceList()));
+      } finally {
+        if (writer != null) {
+          writer.close();
+        }
+      }
+    } catch (IOException e) {
+      LOG.error(
+          "Caught an exception while processing ServiceList request", e);
+      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
new file mode 100644
index 0000000..8475dd9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * OM volume manager interface.
+ */
+public interface VolumeManager {
+
+  /**
+   * Create a new volume.
+   * @param args - Volume args to create a volume
+   */
+  void createVolume(OmVolumeArgs args) throws IOException;
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  void setOwner(String volume, String owner) throws IOException;
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  void setQuota(String volume, long quota) throws IOException;
+
+  /**
+   * Gets the volume information.
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  OmVolumeArgs getVolumeInfo(String volume) throws IOException;
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  void deleteVolume(String volume) throws IOException;
+
+  /**
+   * Checks if the specified user with a role can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acl which needs to be checked for access
+   * @return true if the user has access for the volume, false otherwise
+   * @throws IOException
+   */
+  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException;
+
+  /**
+   * Returns a list of volumes owned by a given user; if user is null,
+   * returns all volumes.
+   *
+   * @param userName
+   *   volume owner
+   * @param prefix
+   *   the volume prefix used to filter the listing result.
+   * @param startKey
+   *   the start volume name determines where to start listing from,
+   *   this key is excluded from the result.
+   * @param maxKeys
+   *   the maximum number of volumes to return.
+   * @return a list of {@link OmVolumeArgs}
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listVolumes(String userName, String prefix,
+                                 String startKey, int maxKeys) throws IOException;
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.

Posted by bh...@apache.org.
HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eecb5baa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eecb5baa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eecb5baa

Branch: refs/heads/HDDS-48
Commit: eecb5baaaaa54599aeae758abd4007e55e5b531f
Parents: 43f7fe8
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jul 9 15:17:21 2018 +0200
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jul 9 15:17:21 2018 +0200

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml    | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eecb5baa/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 6dd2d92..384cedf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1289,11 +1289,10 @@
   <name>dfs.image.transfer.timeout</name>
   <value>60000</value>
   <description>
-        Socket timeout for image transfer in milliseconds. This timeout and the related
-        dfs.image.transfer.bandwidthPerSec parameter should be configured such
-        that normal image transfer can complete successfully.
-        This timeout prevents client hangs when the sender fails during
-        image transfer. This is socket timeout during image transfer.
+        Socket timeout for the HttpURLConnection instance used in the image
+        transfer. This is measured in milliseconds.
+        This timeout prevents client hangs if the connection is idle
+        for this configured timeout, during image transfer.
   </description>
 </property>
 
@@ -1304,9 +1303,7 @@
         Maximum bandwidth used for regular image transfers (instead of
         bootstrapping the standby namenode), in bytes per second.
         This can help keep normal namenode operations responsive during
-        checkpointing. The maximum bandwidth and timeout in
-        dfs.image.transfer.timeout should be set such that normal image
-        transfers can complete successfully.
+        checkpointing.
         A default value of 0 indicates that throttling is disabled.
         The maximum bandwidth used for bootstrapping standby namenode is
         configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
new file mode 100644
index 0000000..05c8d45
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -0,0 +1,277 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.util.Time;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Args for key block. The block instance for the key requested in putKey.
+ * This is returned from OM to client, and client use class to talk to
+ * datanode. Also, this is the metadata written to om.db on server side.
+ */
+public final class OmKeyInfo {
+  private final String volumeName;
+  private final String bucketName;
+  // name of key client specified
+  private String keyName;
+  private long dataSize;
+  private List<OmKeyLocationInfoGroup> keyLocationVersions;
+  private final long creationTime;
+  private long modificationTime;
+  private HddsProtos.ReplicationType type;
+  private HddsProtos.ReplicationFactor factor;
+
+  private OmKeyInfo(String volumeName, String bucketName, String keyName,
+                    List<OmKeyLocationInfoGroup> versions, long dataSize,
+                    long creationTime, long modificationTime, HddsProtos.ReplicationType type,
+                    HddsProtos.ReplicationFactor factor) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.keyName = keyName;
+    this.dataSize = dataSize;
+    // it is important that the versions are ordered from old to new.
+    // Do this sanity check when versions got loaded on creating OmKeyInfo.
+    // TODO : this is not necessary, here only because versioning is still a
+    // work in-progress, remove this following check when versioning is
+    // complete and prove correctly functioning
+    long currentVersion = -1;
+    for (OmKeyLocationInfoGroup version : versions) {
+      Preconditions.checkArgument(
+            currentVersion + 1 == version.getVersion());
+      currentVersion = version.getVersion();
+    }
+    this.keyLocationVersions = versions;
+    this.creationTime = creationTime;
+    this.modificationTime = modificationTime;
+    this.factor = factor;
+    this.type = type;
+  }
+
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  public HddsProtos.ReplicationType getType() {
+    return type;
+  }
+
+  public HddsProtos.ReplicationFactor getFactor() {
+    return factor;
+  }
+
+  public String getKeyName() {
+    return keyName;
+  }
+
+  public void setKeyName(String keyName) {
+    this.keyName = keyName;
+  }
+
+  public long getDataSize() {
+    return dataSize;
+  }
+
+  public void setDataSize(long size) {
+    this.dataSize = size;
+  }
+
+  public synchronized OmKeyLocationInfoGroup getLatestVersionLocations()
+      throws IOException {
+    return keyLocationVersions.size() == 0? null :
+        keyLocationVersions.get(keyLocationVersions.size() - 1);
+  }
+
+  public List<OmKeyLocationInfoGroup> getKeyLocationVersions() {
+    return keyLocationVersions;
+  }
+
+  public void updateModifcationTime() {
+    this.modificationTime = Time.monotonicNow();
+  }
+
+  /**
+   * Append a set of blocks to the latest version. Note that these blocks are
+   * part of the latest version, not a new version.
+   *
+   * @param newLocationList the list of new blocks to be added.
+   * @throws IOException
+   */
+  public synchronized void appendNewBlocks(
+      List<OmKeyLocationInfo> newLocationList) throws IOException {
+    if (keyLocationVersions.size() == 0) {
+      throw new IOException("Appending new block, but no version exist");
+    }
+    OmKeyLocationInfoGroup currentLatestVersion =
+        keyLocationVersions.get(keyLocationVersions.size() - 1);
+    currentLatestVersion.appendNewBlocks(newLocationList);
+    setModificationTime(Time.now());
+  }
+
+  /**
+   * Add a new set of blocks. The new blocks will be added as appending a new
+   * version to the all version list.
+   *
+   * @param newLocationList the list of new blocks to be added.
+   * @throws IOException
+   */
+  public synchronized long addNewVersion(
+      List<OmKeyLocationInfo> newLocationList) throws IOException {
+    long latestVersionNum;
+    if (keyLocationVersions.size() == 0) {
+      // no version exist, these blocks are the very first version.
+      keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList));
+      latestVersionNum = 0;
+    } else {
+      // it is important that the new version are always at the tail of the list
+      OmKeyLocationInfoGroup currentLatestVersion =
+          keyLocationVersions.get(keyLocationVersions.size() - 1);
+      // the new version is created based on the current latest version
+      OmKeyLocationInfoGroup newVersion =
+          currentLatestVersion.generateNextVersion(newLocationList);
+      keyLocationVersions.add(newVersion);
+      latestVersionNum = newVersion.getVersion();
+    }
+    setModificationTime(Time.now());
+    return latestVersionNum;
+  }
+
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  public long getModificationTime() {
+    return modificationTime;
+  }
+
+  public void setModificationTime(long modificationTime) {
+    this.modificationTime = modificationTime;
+  }
+
+  /**
+   * Builder of OmKeyInfo.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private String keyName;
+    private long dataSize;
+    private List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups;
+    private long creationTime;
+    private long modificationTime;
+    private HddsProtos.ReplicationType type;
+    private HddsProtos.ReplicationFactor factor;
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setKeyName(String key) {
+      this.keyName = key;
+      return this;
+    }
+
+    public Builder setOmKeyLocationInfos(
+        List<OmKeyLocationInfoGroup> omKeyLocationInfoList) {
+      this.omKeyLocationInfoGroups = omKeyLocationInfoList;
+      return this;
+    }
+
+    public Builder setDataSize(long size) {
+      this.dataSize = size;
+      return this;
+    }
+
+    public Builder setCreationTime(long crTime) {
+      this.creationTime = crTime;
+      return this;
+    }
+
+    public Builder setModificationTime(long mTime) {
+      this.modificationTime = mTime;
+      return this;
+    }
+
+    public Builder setReplicationFactor(HddsProtos.ReplicationFactor factor) {
+      this.factor = factor;
+      return this;
+    }
+
+    public Builder setReplicationType(HddsProtos.ReplicationType type) {
+      this.type = type;
+      return this;
+    }
+
+    public OmKeyInfo build() {
+      return new OmKeyInfo(
+          volumeName, bucketName, keyName, omKeyLocationInfoGroups,
+          dataSize, creationTime, modificationTime, type, factor);
+    }
+  }
+
+  public KeyInfo getProtobuf() {
+    long latestVersion = keyLocationVersions.size() == 0 ? -1 :
+        keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
+    return KeyInfo.newBuilder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(dataSize)
+        .setFactor(factor)
+        .setType(type)
+        .addAllKeyLocationList(keyLocationVersions.stream()
+            .map(OmKeyLocationInfoGroup::getProtobuf)
+            .collect(Collectors.toList()))
+        .setLatestVersion(latestVersion)
+        .setCreationTime(creationTime)
+        .setModificationTime(modificationTime)
+        .build();
+  }
+
+  public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) {
+    return new OmKeyInfo(
+        keyInfo.getVolumeName(),
+        keyInfo.getBucketName(),
+        keyInfo.getKeyName(),
+        keyInfo.getKeyLocationListList().stream()
+            .map(OmKeyLocationInfoGroup::getFromProtobuf)
+            .collect(Collectors.toList()),
+        keyInfo.getDataSize(),
+        keyInfo.getCreationTime(),
+        keyInfo.getModificationTime(),
+        keyInfo.getType(),
+        keyInfo.getFactor());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
new file mode 100644
index 0000000..3f6666d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
+
+/**
+ * One key can be too huge to fit in one container. In which case it gets split
+ * into a number of subkeys. This class represents one such subkey instance.
+ */
+public final class OmKeyLocationInfo {
+  private final BlockID blockID;
+  private final boolean shouldCreateContainer;
+  // the id of this subkey in all the subkeys.
+  private final long length;
+  private final long offset;
+  // the version number indicating when this block was added
+  private long createVersion;
+
+  private OmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer,
+                            long length, long offset) {
+    this.blockID = blockID;
+    this.shouldCreateContainer = shouldCreateContainer;
+    this.length = length;
+    this.offset = offset;
+  }
+
+  public void setCreateVersion(long version) {
+    createVersion = version;
+  }
+
+  public long getCreateVersion() {
+    return createVersion;
+  }
+
+  public BlockID getBlockID() {
+    return blockID;
+  }
+
+  public long getContainerID() {
+    return blockID.getContainerID();
+  }
+
+  public long getLocalID() {
+    return blockID.getLocalID();
+  }
+
+  public boolean getShouldCreateContainer() {
+    return shouldCreateContainer;
+  }
+
+  public long getLength() {
+    return length;
+  }
+
+  public long getOffset() {
+    return offset;
+  }
+
+  /**
+   * Builder of OmKeyLocationInfo.
+   */
+  public static class Builder {
+    private BlockID blockID;
+    private boolean shouldCreateContainer;
+    private long length;
+    private long offset;
+
+    public Builder setBlockID(BlockID blockId) {
+      this.blockID = blockId;
+      return this;
+    }
+
+    public Builder setShouldCreateContainer(boolean create) {
+      this.shouldCreateContainer = create;
+      return this;
+    }
+
+    public Builder setLength(long len) {
+      this.length = len;
+      return this;
+    }
+
+    public Builder setOffset(long off) {
+      this.offset = off;
+      return this;
+    }
+
+    public OmKeyLocationInfo build() {
+      return new OmKeyLocationInfo(blockID,
+          shouldCreateContainer, length, offset);
+    }
+  }
+
+  public KeyLocation getProtobuf() {
+    return KeyLocation.newBuilder()
+        .setBlockID(blockID.getProtobuf())
+        .setShouldCreateContainer(shouldCreateContainer)
+        .setLength(length)
+        .setOffset(offset)
+        .setCreateVersion(createVersion)
+        .build();
+  }
+
+  public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
+    OmKeyLocationInfo info = new OmKeyLocationInfo(
+        BlockID.getFromProtobuf(keyLocation.getBlockID()),
+        keyLocation.getShouldCreateContainer(),
+        keyLocation.getLength(),
+        keyLocation.getOffset());
+    info.setCreateVersion(keyLocation.getCreateVersion());
+    return info;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
new file mode 100644
index 0000000..8bdcee3
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * A list of key locations. This class represents one single version of the
+ * blocks of a key.
+ */
+public class OmKeyLocationInfoGroup {
+  private final long version;
+  private final List<OmKeyLocationInfo> locationList;
+
+  public OmKeyLocationInfoGroup(long version,
+                                List<OmKeyLocationInfo> locations) {
+    this.version = version;
+    this.locationList = locations;
+  }
+
+  /**
+   * Return only the blocks that are created in the most recent version.
+   *
+   * @return the list of blocks that are created in the latest version.
+   */
+  public List<OmKeyLocationInfo> getBlocksLatestVersionOnly() {
+    List<OmKeyLocationInfo> list = new ArrayList<>();
+    locationList.stream().filter(x -> x.getCreateVersion() == version)
+        .forEach(list::add);
+    return list;
+  }
+
+  public long getVersion() {
+    return version;
+  }
+
+  public List<OmKeyLocationInfo> getLocationList() {
+    return locationList;
+  }
+
+  public KeyLocationList getProtobuf() {
+    return KeyLocationList.newBuilder()
+        .setVersion(version)
+        .addAllKeyLocations(
+            locationList.stream().map(OmKeyLocationInfo::getProtobuf)
+                .collect(Collectors.toList()))
+        .build();
+  }
+
+  public static OmKeyLocationInfoGroup getFromProtobuf(
+      KeyLocationList keyLocationList) {
+    return new OmKeyLocationInfoGroup(
+        keyLocationList.getVersion(),
+        keyLocationList.getKeyLocationsList().stream()
+            .map(OmKeyLocationInfo::getFromProtobuf)
+            .collect(Collectors.toList()));
+  }
+
+  /**
+   * Given a new block location, generate a new version list based upon this
+   * one.
+   *
+   * @param newLocationList a list of new location to be added.
+   * @return
+   */
+  OmKeyLocationInfoGroup generateNextVersion(
+      List<OmKeyLocationInfo> newLocationList) throws IOException {
+    // TODO : revisit if we can do this method more efficiently
+    // one potential inefficiency here is that later version always include
+    // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add
+    // more
+    List<OmKeyLocationInfo> newList = new ArrayList<>();
+    newList.addAll(locationList);
+    for (OmKeyLocationInfo newInfo : newLocationList) {
+      // all these new blocks will have addVersion of current version + 1
+      newInfo.setCreateVersion(version + 1);
+      newList.add(newInfo);
+    }
+    return new OmKeyLocationInfoGroup(version + 1, newList);
+  }
+
+  void appendNewBlocks(List<OmKeyLocationInfo> newLocationList)
+      throws IOException {
+    for (OmKeyLocationInfo info : newLocationList) {
+      info.setCreateVersion(version);
+      locationList.add(info);
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("version:").append(version).append(" ");
+    for (OmKeyLocationInfo kli : locationList) {
+      sb.append(kli.getLocalID()).append(" || ");
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
new file mode 100644
index 0000000..de75a05
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
+
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.ArrayList;
+import java.util.HashMap;
+
+/**
+ * This helper class keeps a map of all user and their permissions.
+ */
+public class OmOzoneAclMap {
+  // per Acl Type user:rights map
+  private ArrayList<Map<String, OzoneAclRights>> aclMaps;
+
+  OmOzoneAclMap() {
+    aclMaps = new ArrayList<>();
+    for (OzoneAclType aclType : OzoneAclType.values()) {
+      aclMaps.add(aclType.ordinal(), new HashMap<>());
+    }
+  }
+
+  private Map<String, OzoneAclRights> getMap(OzoneAclType type) {
+    return aclMaps.get(type.ordinal());
+  }
+
+  // For a given acl type and user, get the stored acl
+  private OzoneAclRights getAcl(OzoneAclType type, String user) {
+    return getMap(type).get(user);
+  }
+
+  // Add a new acl to the map
+  public void addAcl(OzoneAclInfo acl) {
+    getMap(acl.getType()).put(acl.getName(), acl.getRights());
+  }
+
+  // for a given acl, check if the user has access rights
+  public boolean hasAccess(OzoneAclInfo acl) {
+    OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName());
+    if (storedRights != null) {
+      switch (acl.getRights()) {
+      case READ:
+        return (storedRights == OzoneAclRights.READ)
+            || (storedRights == OzoneAclRights.READ_WRITE);
+      case WRITE:
+        return (storedRights == OzoneAclRights.WRITE)
+            || (storedRights == OzoneAclRights.READ_WRITE);
+      case READ_WRITE:
+        return (storedRights == OzoneAclRights.READ_WRITE);
+      default:
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+
+  // Convert this map to OzoneAclInfo Protobuf List
+  public List<OzoneAclInfo> ozoneAclGetProtobuf() {
+    List<OzoneAclInfo> aclList = new LinkedList<>();
+    for (OzoneAclType type: OzoneAclType.values()) {
+      for (Map.Entry<String, OzoneAclRights> entry :
+          aclMaps.get(type.ordinal()).entrySet()) {
+        OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder()
+            .setName(entry.getKey())
+            .setType(type)
+            .setRights(entry.getValue())
+            .build();
+        aclList.add(aclInfo);
+      }
+    }
+
+    return aclList;
+  }
+
+  // Create map from list of OzoneAclInfos
+  public static OmOzoneAclMap ozoneAclGetFromProtobuf(
+      List<OzoneAclInfo> aclList) {
+    OmOzoneAclMap aclMap = new OmOzoneAclMap();
+    for (OzoneAclInfo acl : aclList) {
+      aclMap.addAcl(acl);
+    }
+    return aclMap;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
new file mode 100644
index 0000000..c8b59b6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -0,0 +1,223 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+
+/**
+ * A class that encapsulates the OmVolumeArgs Args.
+ */
+public final class OmVolumeArgs {
+  private final String adminName;
+  private final String ownerName;
+  private final String volume;
+  private final long creationTime;
+  private final long quotaInBytes;
+  private final Map<String, String> keyValueMap;
+  private final OmOzoneAclMap aclMap;
+
+  /**
+   * Private constructor, constructed via builder.
+   * @param adminName  - Administrator's name.
+   * @param ownerName  - Volume owner's name
+   * @param volume - volume name
+   * @param quotaInBytes - Volume Quota in bytes.
+   * @param keyValueMap - keyValue map.
+   * @param aclMap - User to access rights map.
+   * @param creationTime - Volume creation time.
+   */
+  private OmVolumeArgs(String adminName, String ownerName, String volume,
+                       long quotaInBytes, Map<String, String> keyValueMap,
+                       OmOzoneAclMap aclMap, long creationTime) {
+    this.adminName = adminName;
+    this.ownerName = ownerName;
+    this.volume = volume;
+    this.quotaInBytes = quotaInBytes;
+    this.keyValueMap = keyValueMap;
+    this.aclMap = aclMap;
+    this.creationTime = creationTime;
+  }
+
+  /**
+   * Returns the Admin Name.
+   * @return String.
+   */
+  public String getAdminName() {
+    return adminName;
+  }
+
+  /**
+   * Returns the owner Name.
+   * @return String
+   */
+  public String getOwnerName() {
+    return ownerName;
+  }
+
+  /**
+   * Returns the volume Name.
+   * @return String
+   */
+  public String getVolume() {
+    return volume;
+  }
+
+  /**
+   * Returns creation time.
+   * @return long
+   */
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  /**
+   * Returns Quota in Bytes.
+   * @return long, Quota in bytes.
+   */
+  public long getQuotaInBytes() {
+    return quotaInBytes;
+  }
+
+  public Map<String, String> getKeyValueMap() {
+    return keyValueMap;
+  }
+
+  public OmOzoneAclMap getAclMap() {
+    return aclMap;
+  }
+  /**
+   * Returns new builder class that builds a OmVolumeArgs.
+   *
+   * @return Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for OmVolumeArgs.
+   */
+  public static class Builder {
+    private String adminName;
+    private String ownerName;
+    private String volume;
+    private long creationTime;
+    private long quotaInBytes;
+    private Map<String, String> keyValueMap;
+    private OmOzoneAclMap aclMap;
+
+    /**
+     * Constructs a builder.
+     */
+    Builder() {
+      keyValueMap = new HashMap<>();
+      aclMap = new OmOzoneAclMap();
+    }
+
+    public Builder setAdminName(String admin) {
+      this.adminName = admin;
+      return this;
+    }
+
+    public Builder setOwnerName(String owner) {
+      this.ownerName = owner;
+      return this;
+    }
+
+    public Builder setVolume(String volumeName) {
+      this.volume = volumeName;
+      return this;
+    }
+
+    public Builder setCreationTime(long createdOn) {
+      this.creationTime = createdOn;
+      return this;
+    }
+
+    public Builder setQuotaInBytes(long quota) {
+      this.quotaInBytes = quota;
+      return this;
+    }
+
+    public Builder addMetadata(String key, String value) {
+      keyValueMap.put(key, value); // overwrite if present.
+      return this;
+    }
+
+    public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException {
+      aclMap.addAcl(acl);
+      return this;
+    }
+
+    /**
+     * Constructs a CreateVolumeArgument.
+     * @return CreateVolumeArgs.
+     */
+    public OmVolumeArgs build() {
+      Preconditions.checkNotNull(adminName);
+      Preconditions.checkNotNull(ownerName);
+      Preconditions.checkNotNull(volume);
+      return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
+          keyValueMap, aclMap, creationTime);
+    }
+  }
+
+  public VolumeInfo getProtobuf() {
+    List<KeyValue> metadataList = new LinkedList<>();
+    for (Map.Entry<String, String> entry : keyValueMap.entrySet()) {
+      metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()).
+          setValue(entry.getValue()).build());
+    }
+    List<OzoneAclInfo> aclList = aclMap.ozoneAclGetProtobuf();
+
+    return VolumeInfo.newBuilder()
+        .setAdminName(adminName)
+        .setOwnerName(ownerName)
+        .setVolume(volume)
+        .setQuotaInBytes(quotaInBytes)
+        .addAllMetadata(metadataList)
+        .addAllVolumeAcls(aclList)
+        .setCreationTime(creationTime)
+        .build();
+  }
+
+  public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) {
+    Map<String, String> kvMap = volInfo.getMetadataList().stream()
+        .collect(Collectors.toMap(KeyValue::getKey,
+            KeyValue::getValue));
+    OmOzoneAclMap aclMap =
+        OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList());
+
+    return new OmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(),
+        volInfo.getVolume(), volInfo.getQuotaInBytes(), kvMap, aclMap,
+        volInfo.getCreationTime());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
new file mode 100644
index 0000000..bc364e6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+/**
+ * This class represents a open key "session". A session here means a key is
+ * opened by a specific client, the client sends the handler to server, such
+ * that servers can recognize this client, and thus know how to close the key.
+ */
+public class OpenKeySession {
+  private final int id;
+  private final OmKeyInfo keyInfo;
+  // the version of the key when it is being opened in this session.
+  // a block that has a create version equals to open version means it will
+  // be committed only when this open session is closed.
+  private long openVersion;
+
+  public OpenKeySession(int id, OmKeyInfo info, long version) {
+    this.id = id;
+    this.keyInfo = info;
+    this.openVersion = version;
+  }
+
+  public long getOpenVersion() {
+    return this.openVersion;
+  }
+
+  public OmKeyInfo getKeyInfo() {
+    return keyInfo;
+  }
+
+  public int getId() {
+    return id;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
new file mode 100644
index 0000000..9b03aef
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ServicePort;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * ServiceInfo holds the config details of Ozone services.
+ */
+public final class ServiceInfo {
+
+  private static final ObjectReader READER =
+      new ObjectMapper().readerFor(ServiceInfo.class);
+  private static final ObjectWriter WRITER =
+      new ObjectMapper().writerWithDefaultPrettyPrinter();
+
+  /**
+   * Type of node/service.
+   */
+  private NodeType nodeType;
+  /**
+   * Hostname of the node in which the service is running.
+   */
+  private String hostname;
+
+  /**
+   * List of ports the service listens to.
+   */
+  private Map<ServicePort.Type, Integer> ports;
+
+  /**
+   * Default constructor for JSON deserialization.
+   */
+  public ServiceInfo() {}
+
+  /**
+   * Constructs the ServiceInfo for the {@code nodeType}.
+   * @param nodeType type of node/service
+   * @param hostname hostname of the service
+   * @param portList list of ports the service listens to
+   */
+  private ServiceInfo(
+      NodeType nodeType, String hostname, List<ServicePort> portList) {
+    Preconditions.checkNotNull(nodeType);
+    Preconditions.checkNotNull(hostname);
+    this.nodeType = nodeType;
+    this.hostname = hostname;
+    this.ports = new HashMap<>();
+    for (ServicePort port : portList) {
+      ports.put(port.getType(), port.getValue());
+    }
+  }
+
+  /**
+   * Returns the type of node/service.
+   * @return node type
+   */
+  public NodeType getNodeType() {
+    return nodeType;
+  }
+
+  /**
+   * Returns the hostname of the service.
+   * @return hostname
+   */
+  public String getHostname() {
+    return hostname;
+  }
+
+  /**
+   * Returns ServicePort.Type to port mappings.
+   * @return ports
+   */
+  public Map<ServicePort.Type, Integer> getPorts() {
+    return ports;
+  }
+
+  /**
+   * Returns the port for given type, null if the service doesn't support
+   * the type.
+   *
+   * @param type the type of port.
+   *             ex: RPC, HTTP, HTTPS, etc..
+   */
+  @JsonIgnore
+  public int getPort(ServicePort.Type type) {
+    return ports.get(type);
+  }
+
+  /**
+   * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo.
+   *
+   * @return OzoneManagerProtocolProtos.ServiceInfo
+   */
+  @JsonIgnore
+  public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() {
+    OzoneManagerProtocolProtos.ServiceInfo.Builder builder =
+        OzoneManagerProtocolProtos.ServiceInfo.newBuilder();
+    builder.setNodeType(nodeType)
+        .setHostname(hostname)
+        .addAllServicePorts(
+            ports.entrySet().stream()
+                .map(
+                    entry ->
+                        ServicePort.newBuilder()
+                            .setType(entry.getKey())
+                            .setValue(entry.getValue()).build())
+                .collect(Collectors.toList()));
+    return builder.build();
+  }
+
+  /**
+   * Converts OzoneManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}.
+   *
+   * @return {@link ServiceInfo}
+   */
+  @JsonIgnore
+  public static ServiceInfo getFromProtobuf(
+      OzoneManagerProtocolProtos.ServiceInfo serviceInfo) {
+    return new ServiceInfo(serviceInfo.getNodeType(),
+        serviceInfo.getHostname(),
+        serviceInfo.getServicePortsList());
+  }
+
+  /**
+   * Returns a JSON string of this object.
+   *
+   * @return String - json string
+   * @throws IOException
+   */
+  public String toJsonString() throws IOException {
+    return WRITER.writeValueAsString(this);
+  }
+
+  /**
+   * Parse a JSON string into ServiceInfo Object.
+   *
+   * @param jsonString Json String
+   * @return BucketInfo
+   * @throws IOException
+   */
+  public static BucketInfo parse(String jsonString) throws IOException {
+    return READER.readValue(jsonString);
+  }
+
+  /**
+   * Creates a new builder to build {@link ServiceInfo}.
+   * @return {@link ServiceInfo.Builder}
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder used to build/construct {@link ServiceInfo}.
+   */
+  public static class Builder {
+
+    private NodeType node;
+    private String host;
+    private List<ServicePort> portList = new ArrayList<>();
+
+
+    /**
+     * Sets the node/service type.
+     * @param nodeType type of node
+     * @return the builder
+     */
+    public Builder setNodeType(NodeType nodeType) {
+      node = nodeType;
+      return this;
+    }
+
+    /**
+     * Sets the hostname of the service.
+     * @param hostname service hostname
+     * @return the builder
+     */
+    public Builder setHostname(String hostname) {
+      host = hostname;
+      return this;
+    }
+
+    /**
+     * Adds the service port to the service port list.
+     * @param servicePort RPC port
+     * @return the builder
+     */
+    public Builder addServicePort(ServicePort servicePort) {
+      portList.add(servicePort);
+      return this;
+    }
+
+
+    /**
+     * Builds and returns {@link ServiceInfo} with the set values.
+     * @return {@link ServiceInfo}
+     */
+    public ServiceInfo build() {
+      return new ServiceInfo(node, host, portList);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
new file mode 100644
index 0000000..6fc7c8f
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A class that encapsulates the createVolume Args.
+ */
+public final class VolumeArgs {
+  private final String adminName;
+  private final String ownerName;
+  private final String volume;
+  private final long quotaInBytes;
+  private final Map<String, String> extendedAttributes;
+
+  /**
+   * Private constructor, constructed via builder.
+   *
+   * @param adminName - Administrator name.
+   * @param ownerName - Volume owner's name
+   * @param volume - volume name
+   * @param quotaInBytes - Volume Quota in bytes.
+   * @param keyValueMap - keyValue map.
+   */
+  private VolumeArgs(String adminName, String ownerName, String volume,
+      long quotaInBytes, Map<String, String> keyValueMap) {
+    this.adminName = adminName;
+    this.ownerName = ownerName;
+    this.volume = volume;
+    this.quotaInBytes = quotaInBytes;
+    this.extendedAttributes = keyValueMap;
+  }
+
+  /**
+   * Returns the Admin Name.
+   *
+   * @return String.
+   */
+  public String getAdminName() {
+    return adminName;
+  }
+
+  /**
+   * Returns the owner Name.
+   *
+   * @return String
+   */
+  public String getOwnerName() {
+    return ownerName;
+  }
+
+  /**
+   * Returns the volume Name.
+   *
+   * @return String
+   */
+  public String getVolume() {
+    return volume;
+  }
+
+  /**
+   * Returns Quota in Bytes.
+   *
+   * @return long, Quota in bytes.
+   */
+  public long getQuotaInBytes() {
+    return quotaInBytes;
+  }
+
+  public Map<String, String> getExtendedAttributes() {
+    return extendedAttributes;
+  }
+
+  static class Builder {
+    private String adminName;
+    private String ownerName;
+    private String volume;
+    private long quotaInBytes;
+    private Map<String, String> extendedAttributes;
+
+    /**
+     * Constructs a builder.
+     */
+    Builder() {
+      extendedAttributes = new HashMap<>();
+    }
+
+    public void setAdminName(String adminName) {
+      this.adminName = adminName;
+    }
+
+    public void setOwnerName(String ownerName) {
+      this.ownerName = ownerName;
+    }
+
+    public void setVolume(String volume) {
+      this.volume = volume;
+    }
+
+    public void setQuotaInBytes(long quotaInBytes) {
+      this.quotaInBytes = quotaInBytes;
+    }
+
+    public void addMetadata(String key, String value) {
+      extendedAttributes.put(key, value); // overwrite if present.
+    }
+
+    /**
+     * Constructs a CreateVolumeArgument.
+     *
+     * @return CreateVolumeArgs.
+     */
+    public VolumeArgs build() {
+      Preconditions.checkNotNull(adminName);
+      Preconditions.checkNotNull(ownerName);
+      Preconditions.checkNotNull(volume);
+      return new VolumeArgs(adminName, ownerName, volume, quotaInBytes,
+          extendedAttributes);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
new file mode 100644
index 0000000..b1211d8
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..1744cff
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+/**
+ This package contains client side protocol library to communicate with OM.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
new file mode 100644
index 0000000..b7a099d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocol;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Protocol to talk to OM.
+ */
+public interface OzoneManagerProtocol {
+
+  /**
+   * Creates a volume.
+   * @param args - Arguments to create Volume.
+   * @throws IOException
+   */
+  void createVolume(OmVolumeArgs args) throws IOException;
+
+  /**
+   * Changes the owner of a volume.
+   * @param volume  - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  void setOwner(String volume, String owner) throws IOException;
+
+  /**
+   * Changes the Quota on a volume.
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  void setQuota(String volume, long quota) throws IOException;
+
+  /**
+   * Checks if the specified user can access this volume.
+   * @param volume - volume
+   * @param userAcl - user acls which needs to be checked for access
+   * @return true if the user has required access for the volume,
+   *         false otherwise
+   * @throws IOException
+   */
+  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException;
+
+  /**
+   * Gets the volume information.
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  OmVolumeArgs getVolumeInfo(String volume) throws IOException;
+
+  /**
+   * Deletes an existing empty volume.
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  void deleteVolume(String volume) throws IOException;
+
+  /**
+   * Lists volume owned by a specific user.
+   * @param userName - user name
+   * @param prefix  - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listVolumeByUser(String userName, String prefix, String
+      prevKey, int maxKeys) throws IOException;
+
+  /**
+   * Lists volume all volumes in the cluster.
+   * @param prefix  - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listAllVolumes(String prefix, String
+      prevKey, int maxKeys) throws IOException;
+
+  /**
+   * Creates a bucket.
+   * @param bucketInfo - BucketInfo to create Bucket.
+   * @throws IOException
+   */
+  void createBucket(OmBucketInfo bucketInfo) throws IOException;
+
+  /**
+   * Gets the bucket information.
+   * @param volumeName - Volume name.
+   * @param bucketName - Bucket name.
+   * @return OmBucketInfo or exception is thrown.
+   * @throws IOException
+   */
+  OmBucketInfo getBucketInfo(String volumeName, String bucketName)
+      throws IOException;
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  void setBucketProperty(OmBucketArgs args) throws IOException;
+
+  /**
+   * Open the given key and return an open key session.
+   *
+   * @param args the args of the key.
+   * @return OpenKeySession instance that client uses to talk to container.
+   * @throws IOException
+   */
+  OpenKeySession openKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Commit a key. This will make the change from the client visible. The client
+   * is identified by the clientID.
+   *
+   * @param args the key to commit
+   * @param clientID the client identification
+   * @throws IOException
+   */
+  void commitKey(OmKeyArgs args, int clientID) throws IOException;
+
+  /**
+   * Allocate a new block, it is assumed that the client is having an open key
+   * session going on. This block will be appended to this open key session.
+   *
+   * @param args the key to append
+   * @param clientID the client identification
+   * @return an allocated block
+   * @throws IOException
+   */
+  OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException;
+
+  /**
+   * Look up for the container of an existing key.
+   *
+   * @param args the args of the key.
+   * @return OmKeyInfo instance that client uses to talk to container.
+   * @throws IOException
+   */
+  OmKeyInfo lookupKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Rename an existing key within a bucket
+   * @param args the args of the key.
+   * @param toKeyName New name to be used for the Key
+   */
+  void renameKey(OmKeyArgs args, String toKeyName) throws IOException;
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param args the args of the key.
+   * @throws IOException
+   */
+  void deleteKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volume - Name of the volume.
+   * @param bucket - Name of the bucket.
+   * @throws IOException
+   */
+  void deleteBucket(String volume, String bucket) throws IOException;
+
+  /**
+   * Returns a list of buckets represented by {@link OmBucketInfo}
+   * in the given volume. Argument volumeName is required, others
+   * are optional.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param startBucketName
+   *   the start bucket name, only the buckets whose name is
+   *   after this value will be included in the result.
+   * @param bucketPrefix
+   *   bucket name prefix, only the buckets whose name has
+   *   this prefix will be included in the result.
+   * @param maxNumOfBuckets
+   *   the maximum number of buckets to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of buckets.
+   * @throws IOException
+   */
+  List<OmBucketInfo> listBuckets(String volumeName,
+                                 String startBucketName, String bucketPrefix, int maxNumOfBuckets)
+      throws IOException;
+
+  /**
+   * Returns a list of keys represented by {@link OmKeyInfo}
+   * in the given bucket. Argument volumeName, bucketName is required,
+   * others are optional.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param bucketName
+   *   the name of the bucket.
+   * @param startKeyName
+   *   the start key name, only the keys whose name is
+   *   after this value will be included in the result.
+   * @param keyPrefix
+   *   key name prefix, only the keys whose name has
+   *   this prefix will be included in the result.
+   * @param maxKeys
+   *   the maximum number of keys to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of keys.
+   * @throws IOException
+   */
+  List<OmKeyInfo> listKeys(String volumeName,
+                           String bucketName, String startKeyName, String keyPrefix, int maxKeys)
+      throws IOException;
+
+  /**
+   * Returns list of Ozone services with its configuration details.
+   *
+   * @return list of Ozone services
+   * @throws IOException
+   */
+  List<ServiceInfo> getServiceList() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
new file mode 100644
index 0000000..9c7f388
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.protocol;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
new file mode 100644
index 0000000..37151fb
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -0,0 +1,769 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocolPB;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListResponse;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.stream.Collectors;
+
+/**
+ *  The client side implementation of OzoneManagerProtocol.
+ */
+
+@InterfaceAudience.Private
+public final class OzoneManagerProtocolClientSideTranslatorPB
+    implements OzoneManagerProtocol, ProtocolTranslator, Closeable {
+
+  /**
+   * RpcController is not used and hence is set to null.
+   */
+  private static final RpcController NULL_RPC_CONTROLLER = null;
+
+  private final OzoneManagerProtocolPB rpcProxy;
+
+  /**
+   * Constructor for KeySpaceManger Client.
+   * @param rpcProxy
+   */
+  public OzoneManagerProtocolClientSideTranslatorPB(
+      OzoneManagerProtocolPB rpcProxy) {
+    this.rpcProxy = rpcProxy;
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   * <p>
+   * <p> As noted in {@link AutoCloseable#close()}, cases where the
+   * close may fail require careful attention. It is strongly advised
+   * to relinquish the underlying resources and to internally
+   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
+   * the {@code IOException}.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override
+  public void close() throws IOException {
+
+  }
+
+  /**
+   * Creates a volume.
+   *
+   * @param args - Arguments to create Volume.
+   * @throws IOException
+   */
+  @Override
+  public void createVolume(OmVolumeArgs args) throws IOException {
+    CreateVolumeRequest.Builder req =
+        CreateVolumeRequest.newBuilder();
+    VolumeInfo volumeInfo = args.getProtobuf();
+    req.setVolumeInfo(volumeInfo);
+
+    final CreateVolumeResponse resp;
+    try {
+      resp = rpcProxy.createVolume(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Volume creation failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  @Override
+  public void setOwner(String volume, String owner) throws IOException {
+    SetVolumePropertyRequest.Builder req =
+        SetVolumePropertyRequest.newBuilder();
+    req.setVolumeName(volume).setOwnerName(owner);
+    final SetVolumePropertyResponse resp;
+    try {
+      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Volume owner change failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  @Override
+  public void setQuota(String volume, long quota) throws IOException {
+    SetVolumePropertyRequest.Builder req =
+        SetVolumePropertyRequest.newBuilder();
+    req.setVolumeName(volume).setQuotaInBytes(quota);
+    final SetVolumePropertyResponse resp;
+    try {
+      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Volume quota change failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Checks if the specified user can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acls which needs to be checked for access
+   * @return true if the user has required access for the volume,
+   *         false otherwise
+   * @throws IOException
+   */
+  @Override
+  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws
+      IOException {
+    CheckVolumeAccessRequest.Builder req =
+        CheckVolumeAccessRequest.newBuilder();
+    req.setVolumeName(volume).setUserAcl(userAcl);
+    final CheckVolumeAccessResponse resp;
+    try {
+      resp = rpcProxy.checkVolumeAccess(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.ACCESS_DENIED) {
+      return false;
+    } else if (resp.getStatus() == Status.OK) {
+      return true;
+    } else {
+      throw new
+          IOException("Check Volume Access failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Gets the volume information.
+   *
+   * @param volume - Volume name.
+   * @return OmVolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+    InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder();
+    req.setVolumeName(volume);
+    final InfoVolumeResponse resp;
+    try {
+      resp = rpcProxy.infoVolume(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Info Volume failed, error:" + resp.getStatus());
+    }
+    return OmVolumeArgs.getFromProtobuf(resp.getVolumeInfo());
+  }
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  @Override
+  public void deleteVolume(String volume) throws IOException {
+    DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder();
+    req.setVolumeName(volume);
+    final DeleteVolumeResponse resp;
+    try {
+      resp = rpcProxy.deleteVolume(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Delete Volume failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Lists volume owned by a specific user.
+   *
+   * @param userName - user name
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix,
+                                             String prevKey, int maxKeys)
+      throws IOException {
+    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
+    if (!Strings.isNullOrEmpty(prefix)) {
+      builder.setPrefix(prefix);
+    }
+    if (!Strings.isNullOrEmpty(prevKey)) {
+      builder.setPrevKey(prevKey);
+    }
+    builder.setMaxKeys(maxKeys);
+    builder.setUserName(userName);
+    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER);
+    return listVolume(builder.build());
+  }
+
+  /**
+   * Lists volume all volumes in the cluster.
+   *
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listAllVolumes(String prefix, String prevKey,
+                                           int maxKeys) throws IOException {
+    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
+    if (!Strings.isNullOrEmpty(prefix)) {
+      builder.setPrefix(prefix);
+    }
+    if (!Strings.isNullOrEmpty(prevKey)) {
+      builder.setPrevKey(prevKey);
+    }
+    builder.setMaxKeys(maxKeys);
+    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER);
+    return listVolume(builder.build());
+  }
+
+  private List<OmVolumeArgs> listVolume(ListVolumeRequest request)
+      throws IOException {
+    final ListVolumeResponse resp;
+    try {
+      resp = rpcProxy.listVolumes(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("List volume failed, error: "
+          + resp.getStatus());
+    }
+
+    List<OmVolumeArgs> result = Lists.newArrayList();
+    for (VolumeInfo volInfo : resp.getVolumeInfoList()) {
+      OmVolumeArgs volArgs = OmVolumeArgs.getFromProtobuf(volInfo);
+      result.add(volArgs);
+    }
+
+    return resp.getVolumeInfoList().stream()
+        .map(item -> OmVolumeArgs.getFromProtobuf(item))
+        .collect(Collectors.toList());
+  }
+
+  /**
+   * Creates a bucket.
+   *
+   * @param bucketInfo - BucketInfo to create bucket.
+   * @throws IOException
+   */
+  @Override
+  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
+    CreateBucketRequest.Builder req =
+        CreateBucketRequest.newBuilder();
+    BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf();
+    req.setBucketInfo(bucketInfoProtobuf);
+
+    final CreateBucketResponse resp;
+    try {
+      resp = rpcProxy.createBucket(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Bucket creation failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Gets the bucket information.
+   *
+   * @param volume - Volume name.
+   * @param bucket - Bucket name.
+   * @return OmBucketInfo or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmBucketInfo getBucketInfo(String volume, String bucket)
+      throws IOException {
+    InfoBucketRequest.Builder req =
+        InfoBucketRequest.newBuilder();
+    req.setVolumeName(volume);
+    req.setBucketName(bucket);
+
+    final InfoBucketResponse resp;
+    try {
+      resp = rpcProxy.infoBucket(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() == Status.OK) {
+      return OmBucketInfo.getFromProtobuf(resp.getBucketInfo());
+    } else {
+      throw new IOException("Info Bucket failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  @Override
+  public void setBucketProperty(OmBucketArgs args)
+      throws IOException {
+    SetBucketPropertyRequest.Builder req =
+        SetBucketPropertyRequest.newBuilder();
+    BucketArgs bucketArgs = args.getProtobuf();
+    req.setBucketArgs(bucketArgs);
+    final SetBucketPropertyResponse resp;
+    try {
+      resp = rpcProxy.setBucketProperty(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Setting bucket property failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * List buckets in a volume.
+   *
+   * @param volumeName
+   * @param startKey
+   * @param prefix
+   * @param count
+   * @return
+   * @throws IOException
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(String volumeName,
+                                        String startKey, String prefix, int count) throws IOException {
+    List<OmBucketInfo> buckets = new ArrayList<>();
+    ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder();
+    reqBuilder.setVolumeName(volumeName);
+    reqBuilder.setCount(count);
+    if (startKey != null) {
+      reqBuilder.setStartKey(startKey);
+    }
+    if (prefix != null) {
+      reqBuilder.setPrefix(prefix);
+    }
+    ListBucketsRequest request = reqBuilder.build();
+    final ListBucketsResponse resp;
+    try {
+      resp = rpcProxy.listBuckets(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.OK) {
+      buckets.addAll(
+          resp.getBucketInfoList().stream()
+              .map(OmBucketInfo::getFromProtobuf)
+              .collect(Collectors.toList()));
+      return buckets;
+    } else {
+      throw new IOException("List Buckets failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Create a new open session of the key, then use the returned meta info to
+   * talk to data node to actually write the key.
+   * @param args the args for the key to be allocated
+   * @return a handler to the key, returned client
+   * @throws IOException
+   */
+  @Override
+  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setFactor(args.getFactor())
+        .setType(args.getType())
+        .setKeyName(args.getKeyName());
+    if (args.getDataSize() > 0) {
+      keyArgs.setDataSize(args.getDataSize());
+    }
+    req.setKeyArgs(keyArgs.build());
+
+    final LocateKeyResponse resp;
+    try {
+      resp = rpcProxy.createKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Create key failed, error:" + resp.getStatus());
+    }
+    return new OpenKeySession(resp.getID(),
+        OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
+  }
+
+  @Override
+  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException {
+    AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+    req.setClientID(clientID);
+
+    final AllocateBlockResponse resp;
+    try {
+      resp = rpcProxy.allocateBlock(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Allocate block failed, error:" +
+          resp.getStatus());
+    }
+    return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation());
+  }
+
+  @Override
+  public void commitKey(OmKeyArgs args, int clientID)
+      throws IOException {
+    CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+    req.setClientID(clientID);
+
+    final CommitKeyResponse resp;
+    try {
+      resp = rpcProxy.commitKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Commit key failed, error:" +
+          resp.getStatus());
+    }
+  }
+
+
+  @Override
+  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+
+    final LocateKeyResponse resp;
+    try {
+      resp = rpcProxy.lookupKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Lookup key failed, error:" +
+          resp.getStatus());
+    }
+    return OmKeyInfo.getFromProtobuf(resp.getKeyInfo());
+  }
+
+  @Override
+  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+    RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+    req.setToKeyName(toKeyName);
+
+    final RenameKeyResponse resp;
+    try {
+      resp = rpcProxy.renameKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Rename key failed, error:" +
+          resp.getStatus());
+    }
+  }
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param args the args of the key.
+   * @throws IOException
+   */
+  @Override
+  public void deleteKey(OmKeyArgs args) throws IOException {
+    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName()).build();
+    req.setKeyArgs(keyArgs);
+
+    final LocateKeyResponse resp;
+    try {
+      resp = rpcProxy.deleteKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Delete key failed, error:" +
+          resp.getStatus());
+    }
+  }
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volume - Name of the volume.
+   * @param bucket - Name of the bucket.
+   * @throws IOException
+   */
+  public void deleteBucket(String volume, String bucket) throws IOException {
+    DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder();
+    req.setVolumeName(volume);
+    req.setBucketName(bucket);
+    final DeleteBucketResponse resp;
+    try {
+      resp = rpcProxy.deleteBucket(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Delete Bucket failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * List keys in a bucket.
+   */
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String prefix, int maxKeys) throws IOException {
+    List<OmKeyInfo> keys = new ArrayList<>();
+    ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder();
+    reqBuilder.setVolumeName(volumeName);
+    reqBuilder.setBucketName(bucketName);
+    reqBuilder.setCount(maxKeys);
+
+    if (startKey != null) {
+      reqBuilder.setStartKey(startKey);
+    }
+
+    if (prefix != null) {
+      reqBuilder.setPrefix(prefix);
+    }
+
+    ListKeysRequest request = reqBuilder.build();
+    final ListKeysResponse resp;
+    try {
+      resp = rpcProxy.listKeys(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.OK) {
+      keys.addAll(
+          resp.getKeyInfoList().stream()
+              .map(OmKeyInfo::getFromProtobuf)
+              .collect(Collectors.toList()));
+      return keys;
+    } else {
+      throw new IOException("List Keys failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  @Override
+  public List<ServiceInfo> getServiceList() throws IOException {
+    ServiceListRequest request = ServiceListRequest.newBuilder().build();
+    final ServiceListResponse resp;
+    try {
+      resp = rpcProxy.getServiceList(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.OK) {
+      return resp.getServiceInfoList().stream()
+              .map(ServiceInfo::getFromProtobuf)
+              .collect(Collectors.toList());
+    } else {
+      throw new IOException("Getting service list failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Return the proxy object underlying this protocol translator.
+   *
+   * @return the proxy object underlying this protocol translator.
+   */
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
new file mode 100644
index 0000000..e0879d6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneManagerService;
+
+/**
+ * Protocol used to communicate with OM.
+ */
+@ProtocolInfo(protocolName =
+    "org.apache.hadoop.ozone.protocol.OzoneManagerProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.Private
+public interface OzoneManagerProtocolPB
+    extends OzoneManagerService.BlockingInterface {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
new file mode 100644
index 0000000..d595edf
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.protocolPB;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
index fedc0f0..ec33990 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
@@ -22,14 +22,13 @@ import com.google.common.base.Strings;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -37,9 +36,9 @@ import org.apache.hadoop.ozone.OzoneConsts.Versioning;
 import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
 import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -77,8 +76,8 @@ public final class DistributedStorageHandler implements StorageHandler {
 
   private final StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB
-      keySpaceManagerClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB
+      ozoneManagerClient;
   private final XceiverClientManager xceiverClientManager;
   private final OzoneAcl.OzoneACLRights userRights;
   private final OzoneAcl.OzoneACLRights groupRights;
@@ -92,14 +91,14 @@ public final class DistributedStorageHandler implements StorageHandler {
    *
    * @param conf configuration
    * @param storageContainerLocation StorageContainerLocationProtocol proxy
-   * @param keySpaceManagerClient KeySpaceManager proxy
+   * @param ozoneManagerClient OzoneManager proxy
    */
   public DistributedStorageHandler(OzoneConfiguration conf,
       StorageContainerLocationProtocolClientSideTranslatorPB
           storageContainerLocation,
-      KeySpaceManagerProtocolClientSideTranslatorPB
-          keySpaceManagerClient) {
-    this.keySpaceManagerClient = keySpaceManagerClient;
+      OzoneManagerProtocolClientSideTranslatorPB
+                                       ozoneManagerClient) {
+    this.ozoneManagerClient = ozoneManagerClient;
     this.storageContainerLocationClient = storageContainerLocation;
     this.xceiverClientManager = new XceiverClientManager(conf);
     this.useRatis = conf.getBoolean(
@@ -116,10 +115,10 @@ public final class DistributedStorageHandler implements StorageHandler {
 
     chunkSize = conf.getInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
         ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT);
-    userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
-    groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT);
+    userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
+        OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
+    groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS,
+        OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT);
     if(chunkSize > ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE) {
       LOG.warn("The chunk size ({}) is not allowed to be more than"
               + " the maximum size ({}),"
@@ -136,26 +135,26 @@ public final class DistributedStorageHandler implements StorageHandler {
     OzoneAcl userAcl =
         new OzoneAcl(OzoneAcl.OzoneACLType.USER,
             args.getUserName(), userRights);
-    KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder();
+    OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder();
     builder.setAdminName(args.getAdminName())
         .setOwnerName(args.getUserName())
         .setVolume(args.getVolumeName())
         .setQuotaInBytes(quota)
-        .addOzoneAcls(KSMPBHelper.convertOzoneAcl(userAcl));
+        .addOzoneAcls(OMPBHelper.convertOzoneAcl(userAcl));
     if (args.getGroups() != null) {
       for (String group : args.getGroups()) {
         OzoneAcl groupAcl =
             new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights);
-        builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(groupAcl));
+        builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(groupAcl));
       }
     }
-    keySpaceManagerClient.createVolume(builder.build());
+    ozoneManagerClient.createVolume(builder.build());
   }
 
   @Override
   public void setVolumeOwner(VolumeArgs args) throws
       IOException, OzoneException {
-    keySpaceManagerClient.setOwner(args.getVolumeName(), args.getUserName());
+    ozoneManagerClient.setOwner(args.getVolumeName(), args.getUserName());
   }
 
   @Override
@@ -163,14 +162,14 @@ public final class DistributedStorageHandler implements StorageHandler {
       throws IOException, OzoneException {
     long quota = remove ? OzoneConsts.MAX_QUOTA_IN_BYTES :
         args.getQuota().sizeInBytes();
-    keySpaceManagerClient.setQuota(args.getVolumeName(), quota);
+    ozoneManagerClient.setQuota(args.getVolumeName(), quota);
   }
 
   @Override
   public boolean checkVolumeAccess(String volume, OzoneAcl acl)
       throws IOException, OzoneException {
-    return keySpaceManagerClient
-        .checkVolumeAccess(volume, KSMPBHelper.convertOzoneAcl(acl));
+    return ozoneManagerClient
+        .checkVolumeAccess(volume, OMPBHelper.convertOzoneAcl(acl));
   }
 
   @Override
@@ -185,9 +184,9 @@ public final class DistributedStorageHandler implements StorageHandler {
               OzoneConsts.MAX_LISTVOLUMES_SIZE, maxNumOfKeys));
     }
 
-    List<KsmVolumeArgs> listResult;
+    List<OmVolumeArgs> listResult;
     if (args.isRootScan()) {
-      listResult = keySpaceManagerClient.listAllVolumes(args.getPrefix(),
+      listResult = ozoneManagerClient.listAllVolumes(args.getPrefix(),
           args.getPrevKey(), args.getMaxKeys());
     } else {
       UserArgs userArgs = args.getArgs();
@@ -195,16 +194,16 @@ public final class DistributedStorageHandler implements StorageHandler {
         throw new IllegalArgumentException("Illegal argument,"
             + " missing user argument.");
       }
-      listResult = keySpaceManagerClient.listVolumeByUser(
+      listResult = ozoneManagerClient.listVolumeByUser(
           args.getArgs().getUserName(), args.getPrefix(), args.getPrevKey(),
           args.getMaxKeys());
     }
 
     // TODO Add missing fields createdBy, bucketCount and bytesUsed
     ListVolumes result = new ListVolumes();
-    for (KsmVolumeArgs volumeArgs : listResult) {
+    for (OmVolumeArgs volumeArgs : listResult) {
       VolumeInfo info = new VolumeInfo();
-      KeySpaceManagerProtocolProtos.VolumeInfo
+      OzoneManagerProtocolProtos.VolumeInfo
           infoProto = volumeArgs.getProtobuf();
       info.setOwner(new VolumeOwner(infoProto.getOwnerName()));
       info.setQuota(OzoneQuota.getOzoneQuota(infoProto.getQuotaInBytes()));
@@ -220,14 +219,14 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public void deleteVolume(VolumeArgs args)
       throws IOException, OzoneException {
-    keySpaceManagerClient.deleteVolume(args.getVolumeName());
+    ozoneManagerClient.deleteVolume(args.getVolumeName());
   }
 
   @Override
   public VolumeInfo getVolumeInfo(VolumeArgs args)
       throws IOException, OzoneException {
-    KsmVolumeArgs volumeArgs =
-        keySpaceManagerClient.getVolumeInfo(args.getVolumeName());
+    OmVolumeArgs volumeArgs =
+        ozoneManagerClient.getVolumeInfo(args.getVolumeName());
     //TODO: add support for createdOn and other fields in getVolumeInfo
     VolumeInfo volInfo =
         new VolumeInfo(volumeArgs.getVolume(), null,
@@ -242,7 +241,7 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public void createBucket(final BucketArgs args)
       throws IOException, OzoneException {
-    KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder();
+    OmBucketInfo.Builder builder = OmBucketInfo.newBuilder();
     builder.setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName());
     if(args.getAddAcls() != null) {
@@ -255,7 +254,7 @@ public final class DistributedStorageHandler implements StorageHandler {
       builder.setIsVersionEnabled(getBucketVersioningProtobuf(
           args.getVersioning()));
     }
-    keySpaceManagerClient.createBucket(builder.build());
+    ozoneManagerClient.createBucket(builder.build());
   }
 
   /**
@@ -285,7 +284,7 @@ public final class DistributedStorageHandler implements StorageHandler {
     List<OzoneAcl> removeAcls = args.getRemoveAcls();
     List<OzoneAcl> addAcls = args.getAddAcls();
     if(removeAcls != null || addAcls != null) {
-      KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+      OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
       builder.setVolumeName(args.getVolumeName())
           .setBucketName(args.getBucketName());
       if(removeAcls != null && !removeAcls.isEmpty()) {
@@ -294,35 +293,35 @@ public final class DistributedStorageHandler implements StorageHandler {
       if(addAcls != null && !addAcls.isEmpty()) {
         builder.setAddAcls(args.getAddAcls());
       }
-      keySpaceManagerClient.setBucketProperty(builder.build());
+      ozoneManagerClient.setBucketProperty(builder.build());
     }
   }
 
   @Override
   public void setBucketVersioning(BucketArgs args)
       throws IOException, OzoneException {
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setIsVersionEnabled(getBucketVersioningProtobuf(
             args.getVersioning()));
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
   public void setBucketStorageClass(BucketArgs args)
       throws IOException, OzoneException {
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setStorageType(args.getStorageType());
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
   public void deleteBucket(BucketArgs args)
       throws IOException, OzoneException {
-    keySpaceManagerClient.deleteBucket(args.getVolumeName(),
+    ozoneManagerClient.deleteBucket(args.getVolumeName(),
         args.getBucketName());
   }
 
@@ -354,12 +353,12 @@ public final class DistributedStorageHandler implements StorageHandler {
                 OzoneConsts.MAX_LISTBUCKETS_SIZE, maxNumOfKeys));
       }
 
-      List<KsmBucketInfo> buckets =
-          keySpaceManagerClient.listBuckets(va.getVolumeName(),
+      List<OmBucketInfo> buckets =
+          ozoneManagerClient.listBuckets(va.getVolumeName(),
               args.getPrevKey(), args.getPrefix(), args.getMaxKeys());
 
       // Convert the result for the web layer.
-      for (KsmBucketInfo bucketInfo : buckets) {
+      for (OmBucketInfo bucketInfo : buckets) {
         BucketInfo bk = new BucketInfo();
         bk.setVolumeName(bucketInfo.getVolumeName());
         bk.setBucketName(bucketInfo.getBucketName());
@@ -382,26 +381,26 @@ public final class DistributedStorageHandler implements StorageHandler {
       throws IOException {
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
-    KsmBucketInfo ksmBucketInfo = keySpaceManagerClient.getBucketInfo(
+    OmBucketInfo omBucketInfo = ozoneManagerClient.getBucketInfo(
         volumeName, bucketName);
-    BucketInfo bucketInfo = new BucketInfo(ksmBucketInfo.getVolumeName(),
-        ksmBucketInfo.getBucketName());
-    if(ksmBucketInfo.getIsVersionEnabled()) {
+    BucketInfo bucketInfo = new BucketInfo(omBucketInfo.getVolumeName(),
+        omBucketInfo.getBucketName());
+    if(omBucketInfo.getIsVersionEnabled()) {
       bucketInfo.setVersioning(Versioning.ENABLED);
     } else {
       bucketInfo.setVersioning(Versioning.DISABLED);
     }
-    bucketInfo.setStorageType(ksmBucketInfo.getStorageType());
-    bucketInfo.setAcls(ksmBucketInfo.getAcls());
+    bucketInfo.setStorageType(omBucketInfo.getStorageType());
+    bucketInfo.setAcls(omBucketInfo.getAcls());
     bucketInfo.setCreatedOn(
-        HddsClientUtils.formatDateTime(ksmBucketInfo.getCreationTime()));
+        HddsClientUtils.formatDateTime(omBucketInfo.getCreationTime()));
     return bucketInfo;
   }
 
   @Override
   public OutputStream newKeyWriter(KeyArgs args) throws IOException,
       OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
@@ -409,14 +408,14 @@ public final class DistributedStorageHandler implements StorageHandler {
         .setType(xceiverClientManager.getType())
         .setFactor(xceiverClientManager.getFactor())
         .build();
-    // contact KSM to allocate a block for key.
-    OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs);
+    // contact OM to allocate a block for key.
+    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
     ChunkGroupOutputStream groupOutputStream =
         new ChunkGroupOutputStream.Builder()
             .setHandler(openKey)
             .setXceiverClientManager(xceiverClientManager)
             .setScmClient(storageContainerLocationClient)
-            .setKsmClient(keySpaceManagerClient)
+            .setOmClient(ozoneManagerClient)
             .setChunkSize(chunkSize)
             .setRequestID(args.getRequestID())
             .setType(xceiverClientManager.getType())
@@ -437,56 +436,56 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public LengthInputStream newKeyReader(KeyArgs args) throws IOException,
       OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .setDataSize(args.getSize())
         .build();
-    KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs);
-    return ChunkGroupInputStream.getFromKsmKeyInfo(
+    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
+    return ChunkGroupInputStream.getFromOmKeyInfo(
         keyInfo, xceiverClientManager, storageContainerLocationClient,
         args.getRequestID());
   }
 
   @Override
   public void deleteKey(KeyArgs args) throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .build();
-    keySpaceManagerClient.deleteKey(keyArgs);
+    ozoneManagerClient.deleteKey(keyArgs);
   }
 
   @Override
   public void renameKey(KeyArgs args, String toKeyName)
       throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .build();
-    keySpaceManagerClient.renameKey(keyArgs, toKeyName);
+    ozoneManagerClient.renameKey(keyArgs, toKeyName);
   }
 
   @Override
   public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .build();
 
-    KsmKeyInfo ksmKeyInfo = keySpaceManagerClient.lookupKey(keyArgs);
+    OmKeyInfo omKeyInfo = ozoneManagerClient.lookupKey(keyArgs);
     KeyInfo keyInfo = new KeyInfo();
     keyInfo.setVersion(0);
-    keyInfo.setKeyName(ksmKeyInfo.getKeyName());
-    keyInfo.setSize(ksmKeyInfo.getDataSize());
+    keyInfo.setKeyName(omKeyInfo.getKeyName());
+    keyInfo.setSize(omKeyInfo.getDataSize());
     keyInfo.setCreatedOn(
-        HddsClientUtils.formatDateTime(ksmKeyInfo.getCreationTime()));
+        HddsClientUtils.formatDateTime(omKeyInfo.getCreationTime()));
     keyInfo.setModifiedOn(
-        HddsClientUtils.formatDateTime(ksmKeyInfo.getModificationTime()));
+        HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime()));
     return keyInfo;
   }
 
@@ -515,13 +514,13 @@ public final class DistributedStorageHandler implements StorageHandler {
                 OzoneConsts.MAX_LISTKEYS_SIZE, maxNumOfKeys));
       }
 
-      List<KsmKeyInfo> keys=
-          keySpaceManagerClient.listKeys(bucketArgs.getVolumeName(),
+      List<OmKeyInfo> keys=
+          ozoneManagerClient.listKeys(bucketArgs.getVolumeName(),
               bucketArgs.getBucketName(),
               args.getPrevKey(), args.getPrefix(), args.getMaxKeys());
 
       // Convert the result for the web layer.
-      for (KsmKeyInfo info : keys) {
+      for (OmKeyInfo info : keys) {
         KeyInfo tempInfo = new KeyInfo();
         tempInfo.setVersion(0);
         tempInfo.setKeyName(info.getKeyName());
@@ -547,7 +546,7 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public void close() {
     IOUtils.cleanupWithLogger(LOG, xceiverClientManager);
-    IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient);
+    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
     IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
deleted file mode 100644
index 6c75691..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * BucketManager handles all the bucket level operations.
- */
-public interface BucketManager {
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - KsmBucketInfo for creating bucket.
-   */
-  void createBucket(KsmBucketInfo bucketInfo) throws IOException;
-  /**
-   * Returns Bucket Information.
-   * @param volumeName - Name of the Volume.
-   * @param bucketName - Name of the Bucket.
-   */
-  KsmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  void setBucketProperty(KsmBucketArgs args) throws IOException;
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volumeName - Name of the volume.
-   * @param bucketName - Name of the bucket.
-   * @throws IOException
-   */
-  void deleteBucket(String volumeName, String bucketName) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link KsmBucketInfo}
-   * in the given volume.
-   *
-   * @param volumeName
-   *   Required parameter volume name determines buckets in which volume
-   *   to return.
-   * @param startBucket
-   *   Optional start bucket name parameter indicating where to start
-   *   the bucket listing from, this key is excluded from the result.
-   * @param bucketPrefix
-   *   Optional start key parameter, restricting the response to buckets
-   *   that begin with the specified name.
-   * @param maxNumOfBuckets
-   *   The maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<KsmBucketInfo> listBuckets(String volumeName,
-      String startBucket, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java
deleted file mode 100644
index 957a6d9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.util.Time;
-import org.iq80.leveldb.DBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KSM bucket manager.
- */
-public class BucketManagerImpl implements BucketManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BucketManagerImpl.class);
-
-  /**
-   * KSMMetadataManager is used for accessing KSM MetadataDB and ReadWriteLock.
-   */
-  private final KSMMetadataManager metadataManager;
-
-  /**
-   * Constructs BucketManager.
-   * @param metadataManager
-   */
-  public BucketManagerImpl(KSMMetadataManager metadataManager){
-    this.metadataManager = metadataManager;
-  }
-
-  /**
-   * MetadataDB is maintained in MetadataManager and shared between
-   * BucketManager and VolumeManager. (and also by KeyManager)
-   *
-   * BucketManager uses MetadataDB to store bucket level information.
-   *
-   * Keys used in BucketManager for storing data into MetadataDB
-   * for BucketInfo:
-   * {volume/bucket} -> bucketInfo
-   *
-   * Work flow of create bucket:
-   *
-   * -> Check if the Volume exists in metadataDB, if not throw
-   * VolumeNotFoundException.
-   * -> Else check if the Bucket exists in metadataDB, if so throw
-   * BucketExistException
-   * -> Else update MetadataDB with VolumeInfo.
-   */
-
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - KsmBucketInfo.
-   */
-  @Override
-  public void createBucket(KsmBucketInfo bucketInfo) throws IOException {
-    Preconditions.checkNotNull(bucketInfo);
-    metadataManager.writeLock().lock();
-    String volumeName = bucketInfo.getVolumeName();
-    String bucketName = bucketInfo.getBucketName();
-    try {
-      byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-
-      //Check if the volume exists
-      if (metadataManager.get(volumeKey) == null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new KSMException("Volume doesn't exist",
-            KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-      //Check if bucket already exists
-      if (metadataManager.get(bucketKey) != null) {
-        LOG.debug("bucket: {} already exists ", bucketName);
-        throw new KSMException("Bucket already exist",
-            KSMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS);
-      }
-
-      KsmBucketInfo ksmBucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName(bucketInfo.getVolumeName())
-          .setBucketName(bucketInfo.getBucketName())
-          .setAcls(bucketInfo.getAcls())
-          .setStorageType(bucketInfo.getStorageType())
-          .setIsVersionEnabled(bucketInfo.getIsVersionEnabled())
-          .setCreationTime(Time.now())
-          .build();
-      metadataManager.put(bucketKey, ksmBucketInfo.getProtobuf().toByteArray());
-
-      LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Bucket creation failed for bucket:{} in volume:{}",
-            bucketName, volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Returns Bucket Information.
-   *
-   * @param volumeName - Name of the Volume.
-   * @param bucketName - Name of the Bucket.
-   */
-  @Override
-  public KsmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    metadataManager.readLock().lock();
-    try {
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      byte[] value = metadataManager.get(bucketKey);
-      if (value == null) {
-        LOG.debug("bucket: {} not found in volume: {}.", bucketName,
-            volumeName);
-        throw new KSMException("Bucket not found",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-      }
-      return KsmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(value));
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Exception while getting bucket info for bucket: {}",
-            bucketName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(KsmBucketArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    try {
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      //Check if volume exists
-      if(metadataManager.get(metadataManager.getVolumeKey(volumeName)) ==
-          null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new KSMException("Volume doesn't exist",
-            KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-      byte[] value = metadataManager.get(bucketKey);
-      //Check if bucket exist
-      if(value == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new KSMException("Bucket doesn't exist",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-      }
-      KsmBucketInfo oldBucketInfo = KsmBucketInfo.getFromProtobuf(
-          BucketInfo.parseFrom(value));
-      KsmBucketInfo.Builder bucketInfoBuilder = KsmBucketInfo.newBuilder();
-      bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName())
-          .setBucketName(oldBucketInfo.getBucketName());
-
-      //Check ACLs to update
-      if(args.getAddAcls() != null || args.getRemoveAcls() != null) {
-        bucketInfoBuilder.setAcls(getUpdatedAclList(oldBucketInfo.getAcls(),
-            args.getRemoveAcls(), args.getAddAcls()));
-        LOG.debug("Updating ACLs for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder.setAcls(oldBucketInfo.getAcls());
-      }
-
-      //Check StorageType to update
-      StorageType storageType = args.getStorageType();
-      if (storageType != null) {
-        bucketInfoBuilder.setStorageType(storageType);
-        LOG.debug("Updating bucket storage type for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType());
-      }
-
-      //Check Versioning to update
-      Boolean versioning = args.getIsVersionEnabled();
-      if (versioning != null) {
-        bucketInfoBuilder.setIsVersionEnabled(versioning);
-        LOG.debug("Updating bucket versioning for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder
-            .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled());
-      }
-      bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime());
-
-      metadataManager.put(bucketKey,
-          bucketInfoBuilder.build().getProtobuf().toByteArray());
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
-            bucketName, volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Updates the existing ACL list with remove and add ACLs that are passed.
-   * Remove is done before Add.
-   *
-   * @param existingAcls - old ACL list.
-   * @param removeAcls - ACLs to be removed.
-   * @param addAcls - ACLs to be added.
-   * @return updated ACL list.
-   */
-  private List<OzoneAcl> getUpdatedAclList(List<OzoneAcl> existingAcls,
-      List<OzoneAcl> removeAcls, List<OzoneAcl> addAcls) {
-    if(removeAcls != null && !removeAcls.isEmpty()) {
-      existingAcls.removeAll(removeAcls);
-    }
-    if(addAcls != null && !addAcls.isEmpty()) {
-      addAcls.stream().filter(acl -> !existingAcls.contains(acl)).forEach(
-          existingAcls::add);
-    }
-    return existingAcls;
-  }
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volumeName - Name of the volume.
-   * @param bucketName - Name of the bucket.
-   * @throws IOException
-   */
-  public void deleteBucket(String volumeName, String bucketName)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      //Check if volume exists
-      if (metadataManager.get(metadataManager.getVolumeKey(volumeName))
-          == null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new KSMException("Volume doesn't exist",
-            KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-      //Check if bucket exist
-      if (metadataManager.get(bucketKey) == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new KSMException("Bucket doesn't exist",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-      }
-      //Check if bucket is empty
-      if (!metadataManager.isBucketEmpty(volumeName, bucketName)) {
-        LOG.debug("bucket: {} is not empty ", bucketName);
-        throw new KSMException("Bucket is not empty",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_EMPTY);
-      }
-      metadataManager.delete(bucketKey);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
-            volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(String volumeName,
-      String startBucket, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.listBuckets(
-          volumeName, startBucket, bucketPrefix, maxNumOfBuckets);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java
deleted file mode 100644
index bf22332..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
-
-/**
- * This is the JMX management interface for ksm information.
- */
-@InterfaceAudience.Private
-public interface KSMMXBean extends ServiceRuntimeInfo {
-
-  String getRpcPort();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java
deleted file mode 100644
index f5a2d5b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataStore;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.locks.Lock;
-
-/**
- * KSM metadata manager interface.
- */
-public interface KSMMetadataManager {
-  /**
-   * Start metadata manager.
-   */
-  void start();
-
-  /**
-   * Stop metadata manager.
-   */
-  void stop() throws IOException;
-
-  /**
-   * Get metadata store.
-   * @return metadata store.
-   */
-  @VisibleForTesting
-  MetadataStore getStore();
-
-  /**
-   * Returns the read lock used on Metadata DB.
-   * @return readLock
-   */
-  Lock readLock();
-
-  /**
-   * Returns the write lock used on Metadata DB.
-   * @return writeLock
-   */
-  Lock writeLock();
-
-  /**
-   * Returns the value associated with this key.
-   * @param key - key
-   * @return value
-   */
-  byte[] get(byte[] key) throws IOException;
-
-  /**
-   * Puts a Key into Metadata DB.
-   * @param key   - key
-   * @param value - value
-   */
-  void put(byte[] key, byte[] value) throws IOException;
-
-  /**
-   * Deletes a Key from Metadata DB.
-   * @param key   - key
-   */
-  void delete(byte[] key) throws IOException;
-
-  /**
-   * Atomic write a batch of operations.
-   * @param batch
-   * @throws IOException
-   */
-  void writeBatch(BatchOperation batch) throws IOException;
-
-  /**
-   * Given a volume return the corresponding DB key.
-   * @param volume - Volume name
-   */
-  byte[] getVolumeKey(String volume);
-
-  /**
-   * Given a user return the corresponding DB key.
-   * @param user - User name
-   */
-  byte[] getUserKey(String user);
-
-  /**
-   * Given a volume and bucket, return the corresponding DB key.
-   * @param volume - User name
-   * @param bucket - Bucket name
-   */
-  byte[] getBucketKey(String volume, String bucket);
-
-  /**
-   * Given a volume, bucket and a key, return the corresponding DB key.
-   * @param volume - volume name
-   * @param bucket - bucket name
-   * @param key - key name
-   * @return bytes of DB key.
-   */
-  byte[] getDBKeyBytes(String volume, String bucket, String key);
-
-  /**
-   * Returns the DB key name of a deleted key in KSM metadata store.
-   * The name for a deleted key has prefix #deleting# followed by
-   * the actual key name.
-   * @param keyName - key name
-   * @return bytes of DB key.
-   */
-  byte[] getDeletedKeyName(byte[] keyName);
-
-  /**
-   * Returns the DB key name of a open key in KSM metadata store.
-   * Should be #open# prefix followed by actual key name.
-   * @param keyName - key name
-   * @param id - the id for this open
-   * @return bytes of DB key.
-   */
-  byte[] getOpenKeyNameBytes(String keyName, int id);
-
-  /**
-   * Returns the full name of a key given volume name, bucket name and key name.
-   * Generally done by padding certain delimiters.
-   *
-   * @param volumeName - volume name
-   * @param bucketName - bucket name
-   * @param keyName - key name
-   * @return the full key name.
-   */
-  String getKeyWithDBPrefix(String volumeName, String bucketName,
-      String keyName);
-
-  /**
-   * Given a volume, check if it is empty,
-   * i.e there are no buckets inside it.
-   * @param volume - Volume name
-   */
-  boolean isVolumeEmpty(String volume) throws IOException;
-
-  /**
-   * Given a volume/bucket, check if it is empty,
-   * i.e there are no keys inside it.
-   * @param volume - Volume name
-   * @param  bucket - Bucket name
-   * @return true if the bucket is empty
-   */
-  boolean isBucketEmpty(String volume, String bucket) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link KsmBucketInfo}
-   * in the given volume.
-   *
-   * @param volumeName
-   *   the name of the volume. This argument is required,
-   *   this method returns buckets in this given volume.
-   * @param startBucket
-   *   the start bucket name. Only the buckets whose name is
-   *   after this value will be included in the result.
-   *   This key is excluded from the result.
-   * @param bucketPrefix
-   *   bucket name prefix. Only the buckets whose name has
-   *   this prefix will be included in the result.
-   * @param maxNumOfBuckets
-   *   the maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<KsmBucketInfo> listBuckets(String volumeName, String startBucket,
-      String bucketPrefix, int maxNumOfBuckets) throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link KsmKeyInfo}
-   * in the given bucket.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKey
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   *   This key is excluded from the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<KsmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKey, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns a list of volumes owned by a given user; if user is null,
-   * returns all volumes.
-   *
-   * @param userName
-   *   volume owner
-   * @param prefix
-   *   the volume prefix used to filter the listing result.
-   * @param startKey
-   *   the start volume name determines where to start listing from,
-   *   this key is excluded from the result.
-   * @param maxKeys
-   *   the maximum number of volumes to return.
-   * @return a list of {@link KsmVolumeArgs}
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listVolumes(String userName, String prefix,
-      String startKey, int maxKeys) throws IOException;
-
-  /**
-   * Returns a list of pending deletion key info that ups to the given count.
-   * Each entry is a {@link BlockGroup}, which contains the info about the
-   * key name and all its associated block IDs. A pending deletion key is
-   * stored with #deleting# prefix in KSM DB.
-   *
-   * @param count max number of keys to return.
-   * @return a list of {@link BlockGroup} represent keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
-
-  /**
-   * Returns a list of all still open key info. Which contains the info about
-   * the key name and all its associated block IDs. A pending open key has
-   * prefix #open# in KSM DB.
-   *
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getExpiredOpenKeys() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
deleted file mode 100644
index 6664a32..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
+++ /dev/null
@@ -1,526 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
-
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-
-/**
- * KSM metadata manager interface.
- */
-public class KSMMetadataManagerImpl implements KSMMetadataManager {
-
-  private final MetadataStore store;
-  private final ReadWriteLock lock;
-  private final long openKeyExpireThresholdMS;
-
-  public KSMMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
-    File metaDir = getOzoneMetaDirPath(conf);
-    final int cacheSize = conf.getInt(OZONE_KSM_DB_CACHE_SIZE_MB,
-        OZONE_KSM_DB_CACHE_SIZE_DEFAULT);
-    File ksmDBFile = new File(metaDir.getPath(), KSM_DB_NAME);
-    this.store = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setDbFile(ksmDBFile)
-        .setCacheSize(cacheSize * OzoneConsts.MB)
-        .build();
-    this.lock = new ReentrantReadWriteLock();
-    this.openKeyExpireThresholdMS = 1000 * conf.getInt(
-        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
-        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
-  }
-
-  /**
-   * Start metadata manager.
-   */
-  @Override
-  public void start() {
-
-  }
-
-  /**
-   * Stop metadata manager.
-   */
-  @Override
-  public void stop() throws IOException {
-    if (store != null) {
-      store.close();
-    }
-  }
-
-  /**
-   * Get metadata store.
-   * @return store - metadata store.
-   */
-  @VisibleForTesting
-  @Override
-  public MetadataStore getStore() {
-    return store;
-  }
-
-  /**
-   * Given a volume return the corresponding DB key.
-   * @param volume - Volume name
-   */
-  public byte[] getVolumeKey(String volume) {
-    String dbVolumeName = OzoneConsts.KSM_VOLUME_PREFIX + volume;
-    return DFSUtil.string2Bytes(dbVolumeName);
-  }
-
-  /**
-   * Given a user return the corresponding DB key.
-   * @param user - User name
-   */
-  public byte[] getUserKey(String user) {
-    String dbUserName = OzoneConsts.KSM_USER_PREFIX + user;
-    return DFSUtil.string2Bytes(dbUserName);
-  }
-
-  /**
-   * Given a volume and bucket, return the corresponding DB key.
-   * @param volume - User name
-   * @param bucket - Bucket name
-   */
-  public byte[] getBucketKey(String volume, String bucket) {
-    String bucketKeyString = OzoneConsts.KSM_VOLUME_PREFIX + volume
-        + OzoneConsts.KSM_BUCKET_PREFIX + bucket;
-    return DFSUtil.string2Bytes(bucketKeyString);
-  }
-
-  /**
-   * @param volume
-   * @param bucket
-   * @return
-   */
-  private String getBucketWithDBPrefix(String volume, String bucket) {
-    StringBuffer sb = new StringBuffer();
-    sb.append(OzoneConsts.KSM_VOLUME_PREFIX)
-        .append(volume)
-        .append(OzoneConsts.KSM_BUCKET_PREFIX);
-    if (!Strings.isNullOrEmpty(bucket)) {
-      sb.append(bucket);
-    }
-    return sb.toString();
-  }
-
-  @Override
-  public String getKeyWithDBPrefix(String volume, String bucket, String key) {
-    String keyVB = OzoneConsts.KSM_KEY_PREFIX + volume
-        + OzoneConsts.KSM_KEY_PREFIX + bucket
-        + OzoneConsts.KSM_KEY_PREFIX;
-    return Strings.isNullOrEmpty(key) ? keyVB : keyVB + key;
-  }
-
-  @Override
-  public byte[] getDBKeyBytes(String volume, String bucket, String key) {
-    return DFSUtil.string2Bytes(getKeyWithDBPrefix(volume, bucket, key));
-  }
-
-  @Override
-  public byte[] getDeletedKeyName(byte[] keyName) {
-    return DFSUtil.string2Bytes(
-        DELETING_KEY_PREFIX + DFSUtil.bytes2String(keyName));
-  }
-
-  @Override
-  public byte[] getOpenKeyNameBytes(String keyName, int id) {
-    return DFSUtil.string2Bytes(OPEN_KEY_PREFIX + id +
-        OPEN_KEY_ID_DELIMINATOR + keyName);
-  }
-
-  /**
-   * Returns the read lock used on Metadata DB.
-   * @return readLock
-   */
-  @Override
-  public Lock readLock() {
-    return lock.readLock();
-  }
-
-  /**
-   * Returns the write lock used on Metadata DB.
-   * @return writeLock
-   */
-  @Override
-  public Lock writeLock() {
-    return lock.writeLock();
-  }
-
-  /**
-   * Returns the value associated with this key.
-   * @param key - key
-   * @return value
-   */
-  @Override
-  public byte[] get(byte[] key) throws IOException {
-    return store.get(key);
-  }
-
-  /**
-   * Puts a Key into Metadata DB.
-   * @param key   - key
-   * @param value - value
-   */
-  @Override
-  public void put(byte[] key, byte[] value) throws IOException {
-    store.put(key, value);
-  }
-
-  /**
-   * Deletes a Key from Metadata DB.
-   * @param key   - key
-   */
-  public void delete(byte[] key) throws IOException {
-    store.delete(key);
-  }
-
-  @Override
-  public void writeBatch(BatchOperation batch) throws IOException {
-    this.store.writeBatch(batch);
-  }
-
-  /**
-   * Given a volume, check if it is empty, i.e there are no buckets inside it.
-   * @param volume - Volume name
-   * @return true if the volume is empty
-   */
-  public boolean isVolumeEmpty(String volume) throws IOException {
-    String dbVolumeRootName = OzoneConsts.KSM_VOLUME_PREFIX + volume
-        + OzoneConsts.KSM_BUCKET_PREFIX;
-    byte[] dbVolumeRootKey = DFSUtil.string2Bytes(dbVolumeRootName);
-    ImmutablePair<byte[], byte[]> volumeRoot =
-        store.peekAround(0, dbVolumeRootKey);
-    if (volumeRoot != null) {
-      return !DFSUtil.bytes2String(volumeRoot.getKey())
-          .startsWith(dbVolumeRootName);
-    }
-    return true;
-  }
-
-  /**
-   * Given a volume/bucket, check if it is empty,
-   * i.e there are no keys inside it.
-   * @param volume - Volume name
-   * @param bucket - Bucket name
-   * @return true if the bucket is empty
-   */
-  public boolean isBucketEmpty(String volume, String bucket)
-      throws IOException {
-    String keyRootName = getKeyWithDBPrefix(volume, bucket, null);
-    byte[] keyRoot = DFSUtil.string2Bytes(keyRootName);
-    ImmutablePair<byte[], byte[]> firstKey = store.peekAround(0, keyRoot);
-    if (firstKey != null) {
-      return !DFSUtil.bytes2String(firstKey.getKey())
-          .startsWith(keyRootName);
-    }
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(final String volumeName,
-      final String startBucket, final String bucketPrefix,
-      final int maxNumOfBuckets) throws IOException {
-    List<KsmBucketInfo> result = new ArrayList<>();
-    if (Strings.isNullOrEmpty(volumeName)) {
-      throw new KSMException("Volume name is required.",
-          ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-
-    byte[] volumeNameBytes = getVolumeKey(volumeName);
-    if (store.get(volumeNameBytes) == null) {
-      throw new KSMException("Volume " + volumeName + " not found.",
-          ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-
-
-    // A bucket starts with /#volume/#bucket_prefix
-    MetadataKeyFilter filter = (preKey, currentKey, nextKey) -> {
-      if (currentKey != null) {
-        String bucketNamePrefix =
-                getBucketWithDBPrefix(volumeName, bucketPrefix);
-        String bucket = DFSUtil.bytes2String(currentKey);
-        return bucket.startsWith(bucketNamePrefix);
-      }
-      return false;
-    };
-
-    List<Map.Entry<byte[], byte[]>> rangeResult;
-    if (!Strings.isNullOrEmpty(startBucket)) {
-      // Since we are excluding start key from the result,
-      // the maxNumOfBuckets is incremented.
-      rangeResult = store.getSequentialRangeKVs(
-          getBucketKey(volumeName, startBucket),
-          maxNumOfBuckets + 1, filter);
-      if (!rangeResult.isEmpty()) {
-        //Remove start key from result.
-        rangeResult.remove(0);
-      }
-    } else {
-      rangeResult = store.getSequentialRangeKVs(null, maxNumOfBuckets, filter);
-    }
-
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmBucketInfo info = KsmBucketInfo.getFromProtobuf(
-          BucketInfo.parseFrom(entry.getValue()));
-      result.add(info);
-    }
-    return result;
-  }
-
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-    List<KsmKeyInfo> result = new ArrayList<>();
-    if (Strings.isNullOrEmpty(volumeName)) {
-      throw new KSMException("Volume name is required.",
-          ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-
-    if (Strings.isNullOrEmpty(bucketName)) {
-      throw new KSMException("Bucket name is required.",
-          ResultCodes.FAILED_BUCKET_NOT_FOUND);
-    }
-
-    byte[] bucketNameBytes = getBucketKey(volumeName, bucketName);
-    if (store.get(bucketNameBytes) == null) {
-      throw new KSMException("Bucket " + bucketName + " not found.",
-          ResultCodes.FAILED_BUCKET_NOT_FOUND);
-    }
-
-    MetadataKeyFilter filter = new KeyPrefixFilter()
-        .addFilter(getKeyWithDBPrefix(volumeName, bucketName, keyPrefix));
-
-    List<Map.Entry<byte[], byte[]>> rangeResult;
-    if (!Strings.isNullOrEmpty(startKey)) {
-      //Since we are excluding start key from the result,
-      // the maxNumOfBuckets is incremented.
-      rangeResult = store.getSequentialRangeKVs(
-          getDBKeyBytes(volumeName, bucketName, startKey),
-          maxKeys + 1, filter);
-      if (!rangeResult.isEmpty()) {
-        //Remove start key from result.
-        rangeResult.remove(0);
-      }
-    } else {
-      rangeResult = store.getSequentialRangeKVs(null, maxKeys, filter);
-    }
-
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmKeyInfo info = KsmKeyInfo.getFromProtobuf(
-          KeyInfo.parseFrom(entry.getValue()));
-      result.add(info);
-    }
-    return result;
-  }
-
-  @Override
-  public List<KsmVolumeArgs> listVolumes(String userName,
-      String prefix, String startKey, int maxKeys) throws IOException {
-    List<KsmVolumeArgs> result = Lists.newArrayList();
-    VolumeList volumes;
-    if (Strings.isNullOrEmpty(userName)) {
-      volumes = getAllVolumes();
-    } else {
-      volumes = getVolumesByUser(userName);
-    }
-
-    if (volumes == null || volumes.getVolumeNamesCount() == 0) {
-      return result;
-    }
-
-    boolean startKeyFound = Strings.isNullOrEmpty(startKey);
-    for (String volumeName : volumes.getVolumeNamesList()) {
-      if (!Strings.isNullOrEmpty(prefix)) {
-        if (!volumeName.startsWith(prefix)) {
-          continue;
-        }
-      }
-
-      if (!startKeyFound && volumeName.equals(startKey)) {
-        startKeyFound = true;
-        continue;
-      }
-      if (startKeyFound && result.size() < maxKeys) {
-        byte[] volumeInfo = store.get(this.getVolumeKey(volumeName));
-        if (volumeInfo == null) {
-          // Could not get volume info by given volume name,
-          // since the volume name is loaded from db,
-          // this probably means ksm db is corrupted or some entries are
-          // accidentally removed.
-          throw new KSMException("Volume info not found for " + volumeName,
-              ResultCodes.FAILED_VOLUME_NOT_FOUND);
-        }
-        VolumeInfo info = VolumeInfo.parseFrom(volumeInfo);
-        KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(info);
-        result.add(volumeArgs);
-      }
-    }
-
-    return result;
-  }
-
-  private VolumeList getVolumesByUser(String userName)
-      throws KSMException {
-    return getVolumesByUser(getUserKey(userName));
-  }
-
-  private VolumeList getVolumesByUser(byte[] userNameKey)
-      throws KSMException {
-    VolumeList volumes = null;
-    try {
-      byte[] volumesInBytes = store.get(userNameKey);
-      if (volumesInBytes == null) {
-        // No volume found for this user, return an empty list
-        return VolumeList.newBuilder().build();
-      }
-      volumes = VolumeList.parseFrom(volumesInBytes);
-    } catch (IOException e) {
-      throw new KSMException("Unable to get volumes info by the given user, "
-          + "metadata might be corrupted", e,
-          ResultCodes.FAILED_METADATA_ERROR);
-    }
-    return volumes;
-  }
-
-  private VolumeList getAllVolumes() throws IOException {
-    // Scan all users in database
-    KeyPrefixFilter filter =
-        new KeyPrefixFilter().addFilter(OzoneConsts.KSM_USER_PREFIX);
-    // We are not expecting a huge number of users per cluster,
-    // it should be fine to scan all users in db and return us a
-    // list of volume names in string per user.
-    List<Map.Entry<byte[], byte[]>> rangeKVs = store
-        .getSequentialRangeKVs(null, Integer.MAX_VALUE, filter);
-
-    VolumeList.Builder builder = VolumeList.newBuilder();
-    for (Map.Entry<byte[], byte[]> entry : rangeKVs) {
-      VolumeList volumes = this.getVolumesByUser(entry.getKey());
-      builder.addAllVolumeNames(volumes.getVolumeNamesList());
-    }
-
-    return builder.build();
-  }
-
-  @Override
-  public List<BlockGroup> getPendingDeletionKeys(final int count)
-      throws IOException {
-    List<BlockGroup> keyBlocksList = Lists.newArrayList();
-    List<Map.Entry<byte[], byte[]>> rangeResult =
-        store.getRangeKVs(null, count,
-            MetadataKeyFilters.getDeletingKeyFilter());
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmKeyInfo info =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
-      // Get block keys as a list.
-      KsmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
-      if (latest == null) {
-        return Collections.emptyList();
-      }
-      List<BlockID> item = latest.getLocationList().stream()
-          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
-          .collect(Collectors.toList());
-      BlockGroup keyBlocks = BlockGroup.newBuilder()
-          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
-          .addAllBlockIDs(item)
-          .build();
-      keyBlocksList.add(keyBlocks);
-    }
-    return keyBlocksList;
-  }
-
-  @Override
-  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
-    List<BlockGroup> keyBlocksList = Lists.newArrayList();
-    long now = Time.now();
-    final MetadataKeyFilter openKeyFilter =
-        new KeyPrefixFilter().addFilter(OPEN_KEY_PREFIX);
-    List<Map.Entry<byte[], byte[]>> rangeResult =
-        store.getSequentialRangeKVs(null, Integer.MAX_VALUE,
-            openKeyFilter);
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmKeyInfo info =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
-      long lastModify = info.getModificationTime();
-      if (now - lastModify < this.openKeyExpireThresholdMS) {
-        // consider as may still be active, not hanging.
-        continue;
-      }
-      // Get block keys as a list.
-      List<BlockID> item = info.getLatestVersionLocations()
-          .getBlocksLatestVersionOnly().stream()
-          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
-          .collect(Collectors.toList());
-      BlockGroup keyBlocks = BlockGroup.newBuilder()
-          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
-          .addAllBlockIDs(item)
-          .build();
-      keyBlocksList.add(keyBlocks);
-    }
-    return keyBlocksList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
deleted file mode 100644
index 8ee67c3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * This class is for maintaining KeySpaceManager statistics.
- */
-@InterfaceAudience.Private
-@Metrics(about="Key Space Manager Metrics", context="dfs")
-public class KSMMetrics {
-  private static final String SOURCE_NAME =
-      KSMMetrics.class.getSimpleName();
-
-  // KSM request type op metrics
-  private @Metric MutableCounterLong numVolumeOps;
-  private @Metric MutableCounterLong numBucketOps;
-  private @Metric MutableCounterLong numKeyOps;
-
-  // KSM op metrics
-  private @Metric MutableCounterLong numVolumeCreates;
-  private @Metric MutableCounterLong numVolumeUpdates;
-  private @Metric MutableCounterLong numVolumeInfos;
-  private @Metric MutableCounterLong numVolumeCheckAccesses;
-  private @Metric MutableCounterLong numBucketCreates;
-  private @Metric MutableCounterLong numVolumeDeletes;
-  private @Metric MutableCounterLong numBucketInfos;
-  private @Metric MutableCounterLong numBucketUpdates;
-  private @Metric MutableCounterLong numBucketDeletes;
-  private @Metric MutableCounterLong numKeyAllocate;
-  private @Metric MutableCounterLong numKeyLookup;
-  private @Metric MutableCounterLong numKeyRenames;
-  private @Metric MutableCounterLong numKeyDeletes;
-  private @Metric MutableCounterLong numBucketLists;
-  private @Metric MutableCounterLong numKeyLists;
-  private @Metric MutableCounterLong numVolumeLists;
-  private @Metric MutableCounterLong numKeyCommits;
-  private @Metric MutableCounterLong numAllocateBlockCalls;
-  private @Metric MutableCounterLong numGetServiceLists;
-
-  // Failure Metrics
-  private @Metric MutableCounterLong numVolumeCreateFails;
-  private @Metric MutableCounterLong numVolumeUpdateFails;
-  private @Metric MutableCounterLong numVolumeInfoFails;
-  private @Metric MutableCounterLong numVolumeDeleteFails;
-  private @Metric MutableCounterLong numBucketCreateFails;
-  private @Metric MutableCounterLong numVolumeCheckAccessFails;
-  private @Metric MutableCounterLong numBucketInfoFails;
-  private @Metric MutableCounterLong numBucketUpdateFails;
-  private @Metric MutableCounterLong numBucketDeleteFails;
-  private @Metric MutableCounterLong numKeyAllocateFails;
-  private @Metric MutableCounterLong numKeyLookupFails;
-  private @Metric MutableCounterLong numKeyRenameFails;
-  private @Metric MutableCounterLong numKeyDeleteFails;
-  private @Metric MutableCounterLong numBucketListFails;
-  private @Metric MutableCounterLong numKeyListFails;
-  private @Metric MutableCounterLong numVolumeListFails;
-  private @Metric MutableCounterLong numKeyCommitFails;
-  private @Metric MutableCounterLong numBlockAllocateCallFails;
-  private @Metric MutableCounterLong numGetServiceListFails;
-
-  public KSMMetrics() {
-  }
-
-  public static KSMMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME,
-        "Key Space Manager Metrics",
-        new KSMMetrics());
-  }
-
-  public void incNumVolumeCreates() {
-    numVolumeOps.incr();
-    numVolumeCreates.incr();
-  }
-
-  public void incNumVolumeUpdates() {
-    numVolumeOps.incr();
-    numVolumeUpdates.incr();
-  }
-
-  public void incNumVolumeInfos() {
-    numVolumeOps.incr();
-    numVolumeInfos.incr();
-  }
-
-  public void incNumVolumeDeletes() {
-    numVolumeOps.incr();
-    numVolumeDeletes.incr();
-  }
-
-  public void incNumVolumeCheckAccesses() {
-    numVolumeOps.incr();
-    numVolumeCheckAccesses.incr();
-  }
-
-  public void incNumBucketCreates() {
-    numBucketOps.incr();
-    numBucketCreates.incr();
-  }
-
-  public void incNumBucketInfos() {
-    numBucketOps.incr();
-    numBucketInfos.incr();
-  }
-
-  public void incNumBucketUpdates() {
-    numBucketOps.incr();
-    numBucketUpdates.incr();
-  }
-
-  public void incNumBucketDeletes() {
-    numBucketOps.incr();
-    numBucketDeletes.incr();
-  }
-
-  public void incNumBucketLists() {
-    numBucketOps.incr();
-    numBucketLists.incr();
-  }
-
-  public void incNumKeyLists() {
-    numKeyOps.incr();
-    numKeyLists.incr();
-  }
-
-  public void incNumVolumeLists() {
-    numVolumeOps.incr();
-    numVolumeLists.incr();
-  }
-
-  public void incNumGetServiceLists() {
-    numGetServiceLists.incr();
-  }
-
-  public void incNumVolumeCreateFails() {
-    numVolumeCreateFails.incr();
-  }
-
-  public void incNumVolumeUpdateFails() {
-    numVolumeUpdateFails.incr();
-  }
-
-  public void incNumVolumeInfoFails() {
-    numVolumeInfoFails.incr();
-  }
-
-  public void incNumVolumeDeleteFails() {
-    numVolumeDeleteFails.incr();
-  }
-
-  public void incNumVolumeCheckAccessFails() {
-    numVolumeCheckAccessFails.incr();
-  }
-
-  public void incNumBucketCreateFails() {
-    numBucketCreateFails.incr();
-  }
-
-  public void incNumBucketInfoFails() {
-    numBucketInfoFails.incr();
-  }
-
-  public void incNumBucketUpdateFails() {
-    numBucketUpdateFails.incr();
-  }
-
-  public void incNumBucketDeleteFails() {
-    numBucketDeleteFails.incr();
-  }
-
-  public void incNumKeyAllocates() {
-    numKeyOps.incr();
-    numKeyAllocate.incr();
-  }
-
-  public void incNumKeyAllocateFails() {
-    numKeyAllocateFails.incr();
-  }
-
-  public void incNumKeyLookups() {
-    numKeyOps.incr();
-    numKeyLookup.incr();
-  }
-
-  public void incNumKeyLookupFails() {
-    numKeyLookupFails.incr();
-  }
-
-  public void incNumKeyRenames() {
-    numKeyOps.incr();
-    numKeyRenames.incr();
-  }
-
-  public void incNumKeyRenameFails() {
-    numKeyOps.incr();
-    numKeyRenameFails.incr();
-  }
-
-  public void incNumKeyDeleteFails() {
-    numKeyDeleteFails.incr();
-  }
-
-  public void incNumKeyDeletes() {
-    numKeyOps.incr();
-    numKeyDeletes.incr();
-  }
-
-  public void incNumKeyCommits() {
-    numKeyOps.incr();
-    numKeyCommits.incr();
-  }
-
-  public void incNumKeyCommitFails() {
-    numKeyCommitFails.incr();
-  }
-
-  public void incNumBlockAllocateCalls() {
-    numAllocateBlockCalls.incr();
-  }
-
-  public void incNumBlockAllocateCallFails() {
-    numBlockAllocateCallFails.incr();
-  }
-
-  public void incNumBucketListFails() {
-    numBucketListFails.incr();
-  }
-
-  public void incNumKeyListFails() {
-    numKeyListFails.incr();
-  }
-
-  public void incNumVolumeListFails() {
-    numVolumeListFails.incr();
-  }
-
-  public void incNumGetServiceListFails() {
-    numGetServiceListFails.incr();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCreates() {
-    return numVolumeCreates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeUpdates() {
-    return numVolumeUpdates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeInfos() {
-    return numVolumeInfos.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeDeletes() {
-    return numVolumeDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCheckAccesses() {
-    return numVolumeCheckAccesses.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketCreates() {
-    return numBucketCreates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketInfos() {
-    return numBucketInfos.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketUpdates() {
-    return numBucketUpdates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketDeletes() {
-    return numBucketDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketLists() {
-    return numBucketLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeLists() {
-    return numVolumeLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLists() {
-    return numKeyLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumGetServiceLists() {
-    return numGetServiceLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCreateFails() {
-    return numVolumeCreateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeUpdateFails() {
-    return numVolumeUpdateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeInfoFails() {
-    return numVolumeInfoFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeDeleteFails() {
-    return numVolumeDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCheckAccessFails() {
-    return numVolumeCheckAccessFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketCreateFails() {
-    return numBucketCreateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketInfoFails() {
-    return numBucketInfoFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketUpdateFails() {
-    return numBucketUpdateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketDeleteFails() {
-    return numBucketDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyAllocates() {
-    return numKeyAllocate.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyAllocateFails() {
-    return numKeyAllocateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLookups() {
-    return numKeyLookup.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLookupFails() {
-    return numKeyLookupFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyRenames() {
-    return numKeyRenames.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyRenameFails() {
-    return numKeyRenameFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyDeletes() {
-    return numKeyDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyDeletesFails() {
-    return numKeyDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketListFails() {
-    return numBucketListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyListFails() {
-    return numKeyListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeListFails() {
-    return numVolumeListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyCommits() {
-    return numKeyCommits.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyCommitFails() {
-    return numKeyCommitFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBlockAllocates() {
-    return numAllocateBlockCalls.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBlockAllocateFails() {
-    return numBlockAllocateCallFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumGetServiceListFails() {
-    return numGetServiceListFails.value();
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java
deleted file mode 100644
index 015bed6..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-
-/**
- * KSMStorage is responsible for management of the StorageDirectories used by
- * the KSM.
- */
-public class KSMStorage extends Storage {
-
-  public static final String STORAGE_DIR = "ksm";
-  public static final String KSM_ID = "ksmUuid";
-
-  /**
-   * Construct KSMStorage.
-   * @throws IOException if any directories are inaccessible.
-   */
-  public KSMStorage(OzoneConfiguration conf) throws IOException {
-    super(NodeType.KSM, getOzoneMetaDirPath(conf), STORAGE_DIR);
-  }
-
-  public void setScmId(String scmId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("KSM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(SCM_ID, scmId);
-    }
-  }
-
-  public void setKsmId(String ksmId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("KSM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(KSM_ID, ksmId);
-    }
-  }
-
-  /**
-   * Retrieves the SCM ID from the version file.
-   * @return SCM_ID
-   */
-  public String getScmId() {
-    return getStorageInfo().getProperty(SCM_ID);
-  }
-
-  /**
-   * Retrieves the KSM ID from the version file.
-   * @return KSM_ID
-   */
-  public String getKsmId() {
-    return getStorageInfo().getProperty(KSM_ID);
-  }
-
-  @Override
-  protected Properties getNodeProperties() {
-    String ksmId = getKsmId();
-    if (ksmId == null) {
-      ksmId = UUID.randomUUID().toString();
-    }
-    Properties ksmProperties = new Properties();
-    ksmProperties.setProperty(KSM_ID, ksmId);
-    return ksmProperties;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
deleted file mode 100644
index e51ab28..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.BackgroundTask;
-import org.apache.hadoop.utils.BackgroundTaskQueue;
-import org.apache.hadoop.utils.BackgroundTaskResult;
-import org.apache.hadoop.utils.BackgroundTaskResult.EmptyTaskResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT;
-
-/**
- * This is the background service to delete keys.
- * Scan the metadata of ksm periodically to get
- * the keys with prefix "#deleting" and ask scm to
- * delete metadata accordingly, if scm returns
- * success for keys, then clean up those keys.
- */
-public class KeyDeletingService extends BackgroundService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyDeletingService.class);
-
-  // The thread pool size for key deleting service.
-  private final static int KEY_DELETING_CORE_POOL_SIZE = 2;
-
-  private final ScmBlockLocationProtocol scmClient;
-  private final KeyManager manager;
-  private final int keyLimitPerTask;
-
-  public KeyDeletingService(ScmBlockLocationProtocol scmClient,
-      KeyManager manager, long serviceInterval,
-      long serviceTimeout, Configuration conf) {
-    super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS,
-        KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
-    this.scmClient = scmClient;
-    this.manager = manager;
-    this.keyLimitPerTask = conf.getInt(OZONE_KEY_DELETING_LIMIT_PER_TASK,
-        OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT);
-  }
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    queue.add(new KeyDeletingTask());
-    return queue;
-  }
-
-  /**
-   * A key deleting task scans KSM DB and looking for a certain number
-   * of pending-deletion keys, sends these keys along with their associated
-   * blocks to SCM for deletion. Once SCM confirms keys are deleted (once
-   * SCM persisted the blocks info in its deletedBlockLog), it removes
-   * these keys from the DB.
-   */
-  private class KeyDeletingTask implements
-      BackgroundTask<BackgroundTaskResult> {
-
-    @Override
-    public int getPriority() {
-      return 0;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      try {
-        long startTime = Time.monotonicNow();
-        List<BlockGroup> keyBlocksList = manager
-            .getPendingDeletionKeys(keyLimitPerTask);
-        if (keyBlocksList.size() > 0) {
-          LOG.info("Found {} to-delete keys in KSM", keyBlocksList.size());
-          List<DeleteBlockGroupResult> results =
-              scmClient.deleteKeyBlocks(keyBlocksList);
-          for (DeleteBlockGroupResult result : results) {
-            if (result.isSuccess()) {
-              try {
-                // Purge key from KSM DB.
-                manager.deletePendingDeletionKey(result.getObjectKey());
-                LOG.debug("Key {} deleted from KSM DB", result.getObjectKey());
-              } catch (IOException e) {
-                // if a pending deletion key is failed to delete,
-                // print a warning here and retain it in this state,
-                // so that it can be attempt to delete next time.
-                LOG.warn("Failed to delete pending-deletion key {}",
-                    result.getObjectKey(), e);
-              }
-            } else {
-              // Key deletion failed, retry in next interval.
-              LOG.warn("Key {} deletion failed because some of the blocks"
-                  + " were failed to delete, failed blocks: {}",
-                  result.getObjectKey(),
-                  StringUtils.join(",", result.getFailedBlocks()));
-            }
-          }
-
-          if (!results.isEmpty()) {
-            LOG.info("Number of key deleted from KSM DB: {},"
-                + " task elapsed time: {}ms",
-                results.size(), Time.monotonicNow() - startTime);
-          }
-
-          return results::size;
-        } else {
-          LOG.debug("No pending deletion key found in KSM");
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to get pending deletion keys, retry in"
-            + " next interval", e);
-      }
-      return EmptyTaskResult.newResult();
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: YARN-8302. ATS v2 should handle HBase connection issue properly. Contributed by Billie Rinaldi.

Posted by bh...@apache.org.
YARN-8302. ATS v2 should handle HBase connection issue properly. Contributed by Billie Rinaldi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba683204
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba683204
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba683204

Branch: refs/heads/HDDS-48
Commit: ba683204498c97654be4727ab9e128c433a45498
Parents: 0247cb6
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Fri Jul 6 15:19:01 2018 -0700
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Jul 6 15:19:01 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   7 +
 .../storage/TestTimelineReaderHBaseDown.java    | 220 +++++++++++++++++++
 .../storage/HBaseTimelineReaderImpl.java        |  93 ++++++++
 3 files changed, 320 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba683204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5842d64..9156c2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3659,6 +3659,13 @@ public class YarnConfiguration extends Configuration {
       DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS =
       DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
 
+  @Private
+  public static final String
+      TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS =
+      TIMELINE_SERVICE_READER_PREFIX + "storage-monitor.interval-ms";
+  public static final long
+      DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS = 60 * 1000;
+
   /**
    * Marked collector properties as Private since it run as auxillary service.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba683204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
new file mode 100644
index 0000000..786f529
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS;
+import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.DATA_TO_RETRIEVE;
+import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.MONITOR_FILTERS;
+
+public class TestTimelineReaderHBaseDown {
+
+  @Test(timeout=300000)
+  public void testTimelineReaderHBaseUp() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+    try {
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+      server.start();
+      checkQuery(htr);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderInitWhenHBaseIsDown() throws
+      TimeoutException, InterruptedException {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+    TimelineReaderServer server = getTimelineReaderServer();
+
+    // init timeline reader when hbase is not running
+    server.init(util.getConfiguration());
+    HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+    server.start();
+    waitForHBaseDown(htr);
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderDetectsHBaseDown() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+
+    try {
+      // start minicluster
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      // init timeline reader
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+
+      // stop hbase after timeline reader init
+      util.shutdownMiniHBaseCluster();
+
+      // start server and check that it detects hbase is down
+      server.start();
+      waitForHBaseDown(htr);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderDetectsZooKeeperDown() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+
+    try {
+      // start minicluster
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      // init timeline reader
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+
+      // stop hbase and zookeeper after timeline reader init
+      util.shutdownMiniCluster();
+
+      // start server and check that it detects hbase is down
+      server.start();
+      waitForHBaseDown(htr);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderRecoversAfterHBaseReturns() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+
+    try {
+      // start minicluster
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      // init timeline reader
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+
+      // stop hbase after timeline reader init
+      util.shutdownMiniHBaseCluster();
+
+      // start server and check that it detects hbase is down
+      server.start();
+      waitForHBaseDown(htr);
+
+      util.startMiniHBaseCluster(1, 1);
+      GenericTestUtils.waitFor(() -> !htr.isHBaseDown(), 1000, 150000);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  private static void waitForHBaseDown(HBaseTimelineReaderImpl htr) throws
+      TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> htr.isHBaseDown(), 1000, 150000);
+    try {
+      checkQuery(htr);
+      Assert.fail("Query should fail when HBase is down");
+    } catch (IOException e) {
+      Assert.assertEquals("HBase is down", e.getMessage());
+    }
+  }
+
+  private static void checkQuery(HBaseTimelineReaderImpl htr) throws
+      IOException {
+    TimelineReaderContext context =
+        new TimelineReaderContext(YarnConfiguration.DEFAULT_RM_CLUSTER_ID,
+            null, null, null, null, TimelineEntityType
+            .YARN_FLOW_ACTIVITY.toString(), null, null);
+    Set<TimelineEntity> entities = htr.getEntities(context, MONITOR_FILTERS,
+        DATA_TO_RETRIEVE);
+  }
+
+  private static void configure(HBaseTestingUtility util) {
+    Configuration config = util.getConfiguration();
+    config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
+        "localhost:0");
+    config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
+    config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
+        "org.apache.hadoop.yarn.server.timelineservice.storage."
+            + "HBaseTimelineReaderImpl");
+    config.setInt("hfile.format.version", 3);
+    config.setLong(TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS, 5000);
+  }
+
+  private static TimelineReaderServer getTimelineReaderServer() {
+    return new TimelineReaderServer() {
+      @Override
+      protected void addFilters(Configuration conf) {
+        // The parent code uses hadoop-common jar from this version of
+        // Hadoop, but the tests are using hadoop-common jar from
+        // ${hbase-compatible-hadoop.version}.  This version uses Jetty 9
+        // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
+        // are many differences, including classnames and packages.
+        // We do nothing here, so that we don't cause a NoSuchMethodError or
+        // NoClassDefFoundError.
+        // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
+        // we should be able to remove this @Override.
+      }
+    };
+  }
+
+  private static HBaseTimelineReaderImpl getHBaseTimelineReaderImpl(
+      TimelineReaderServer server) {
+    for (Service s: server.getServices()) {
+      if (s instanceof HBaseTimelineReaderImpl) {
+        return (HBaseTimelineReaderImpl) s;
+      }
+    }
+    throw new IllegalStateException("Couldn't find HBaseTimelineReaderImpl");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba683204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
index 1ebfab2..fadfd14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
@@ -20,12 +20,18 @@ package org.apache.hadoop.yarn.server.timelineservice.storage;
 
 import java.io.IOException;
 import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -47,6 +53,12 @@ public class HBaseTimelineReaderImpl
 
   private Configuration hbaseConf = null;
   private Connection conn;
+  private Configuration monitorHBaseConf = null;
+  private Connection monitorConn;
+  private ScheduledExecutorService monitorExecutorService;
+  private TimelineReaderContext monitorContext;
+  private long monitorInterval;
+  private AtomicBoolean hbaseDown = new AtomicBoolean();
 
   public HBaseTimelineReaderImpl() {
     super(HBaseTimelineReaderImpl.class.getName());
@@ -55,22 +67,72 @@ public class HBaseTimelineReaderImpl
   @Override
   public void serviceInit(Configuration conf) throws Exception {
     super.serviceInit(conf);
+
+    String clusterId = conf.get(
+        YarnConfiguration.RM_CLUSTER_ID,
+        YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
+    monitorContext =
+        new TimelineReaderContext(clusterId, null, null, null, null,
+            TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null, null);
+    monitorInterval = conf.getLong(
+        YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS);
+
+    monitorHBaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
+    monitorHBaseConf.setInt("hbase.client.retries.number", 3);
+    monitorHBaseConf.setLong("hbase.client.pause", 1000);
+    monitorHBaseConf.setLong("hbase.rpc.timeout", monitorInterval);
+    monitorHBaseConf.setLong("hbase.client.scanner.timeout.period",
+        monitorInterval);
+    monitorHBaseConf.setInt("zookeeper.recovery.retry", 1);
+    monitorConn = ConnectionFactory.createConnection(monitorHBaseConf);
+
+    monitorExecutorService = Executors.newScheduledThreadPool(1);
+
     hbaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
     conn = ConnectionFactory.createConnection(hbaseConf);
   }
 
   @Override
+  protected void serviceStart() throws Exception {
+    super.serviceStart();
+    LOG.info("Scheduling HBase liveness monitor at interval {}",
+        monitorInterval);
+    monitorExecutorService.scheduleAtFixedRate(new HBaseMonitor(), 0,
+        monitorInterval, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
   protected void serviceStop() throws Exception {
     if (conn != null) {
       LOG.info("closing the hbase Connection");
       conn.close();
     }
+    if (monitorExecutorService != null) {
+      monitorExecutorService.shutdownNow();
+      if (!monitorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
+        LOG.warn("failed to stop the monitir task in time. " +
+            "will still proceed to close the monitor.");
+      }
+    }
+    monitorConn.close();
     super.serviceStop();
   }
 
+  private void checkHBaseDown() throws IOException {
+    if (hbaseDown.get()) {
+      throw new IOException("HBase is down");
+    }
+  }
+
+  public boolean isHBaseDown() {
+    return hbaseDown.get();
+  }
+
   @Override
   public TimelineEntity getEntity(TimelineReaderContext context,
       TimelineDataToRetrieve dataToRetrieve) throws IOException {
+    checkHBaseDown();
     TimelineEntityReader reader =
         TimelineEntityReaderFactory.createSingleEntityReader(context,
             dataToRetrieve);
@@ -81,6 +143,7 @@ public class HBaseTimelineReaderImpl
   public Set<TimelineEntity> getEntities(TimelineReaderContext context,
       TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
       throws IOException {
+    checkHBaseDown();
     TimelineEntityReader reader =
         TimelineEntityReaderFactory.createMultipleEntitiesReader(context,
             filters, dataToRetrieve);
@@ -90,7 +153,37 @@ public class HBaseTimelineReaderImpl
   @Override
   public Set<String> getEntityTypes(TimelineReaderContext context)
       throws IOException {
+    checkHBaseDown();
     EntityTypeReader reader = new EntityTypeReader(context);
     return reader.readEntityTypes(hbaseConf, conn);
   }
+
+  protected static final TimelineEntityFilters MONITOR_FILTERS =
+      new TimelineEntityFilters.Builder().entityLimit(1L).build();
+  protected static final TimelineDataToRetrieve DATA_TO_RETRIEVE =
+      new TimelineDataToRetrieve(null, null, null, null, null, null);
+
+  private class HBaseMonitor implements Runnable {
+    @Override
+    public void run() {
+      try {
+        LOG.info("Running HBase liveness monitor");
+        TimelineEntityReader reader =
+            TimelineEntityReaderFactory.createMultipleEntitiesReader(
+                monitorContext, MONITOR_FILTERS, DATA_TO_RETRIEVE);
+        reader.readEntities(monitorHBaseConf, monitorConn);
+
+        // on success, reset hbase down flag
+        if (hbaseDown.getAndSet(false)) {
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("HBase request succeeded, assuming HBase up");
+          }
+        }
+      } catch (Exception e) {
+        LOG.warn("Got failure attempting to read from timeline storage, " +
+            "assuming HBase down", e);
+        hbaseDown.getAndSet(true);
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk

Posted by bh...@apache.org.
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c163d179
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c163d179
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c163d179

Branch: refs/heads/HDDS-48
Commit: c163d1797ade0f47d35b4a44381b8ef1dfec5b60
Parents: 0d9804d 99febe7
Author: Giovanni Matteo Fumarola <gi...@apache.com>
Authored: Thu Jul 5 10:55:05 2018 -0700
Committer: Giovanni Matteo Fumarola <gi...@apache.com>
Committed: Thu Jul 5 10:55:05 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 +
 .../api/records/impl/LightWeightResource.java   |  23 +-
 .../scheduler/fair/ConfigurableResource.java    |  69 ++++-
 .../fair/FairSchedulerConfiguration.java        | 174 ++++++++++--
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    | 160 ++++++++---
 .../webapp/TestRMWebServices.java               |  31 ++-
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 +++++++++++++++++
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 +++----
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ++++++++++
 .../FairSchedulerJsonVerifications.java         | 139 ++++++++++
 .../FairSchedulerXmlVerifications.java          | 153 +++++++++++
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 +++++++++++++++++++
 .../webapp/helper/AppInfoJsonVerifications.java | 123 +++++++++
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 +++++++++
 .../webapp/helper/BufferedClientResponse.java   |  57 ++++
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ++++++
 .../ResourceRequestsJsonVerifications.java      | 252 +++++++++++++++++
 .../ResourceRequestsXmlVerifications.java       | 215 +++++++++++++++
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 ++++++++
 .../src/site/markdown/FairScheduler.md          |   6 +-
 28 files changed, 2405 insertions(+), 157 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: Only mount non-empty directories for cgroups (miklos.szegedi@cloudera.com via rkanter)

Posted by bh...@apache.org.
Only mount non-empty directories for cgroups (miklos.szegedi@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0838fe83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0838fe83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0838fe83

Branch: refs/heads/HDDS-48
Commit: 0838fe833738e04f5e6f6408e97866d77bebbf30
Parents: eecb5ba
Author: Robert Kanter <rk...@apache.org>
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Mon Jul 9 10:37:20 2018 -0700

----------------------------------------------------------------------
 .../impl/container-executor.c                   | 30 +++++++++++++++++++-
 .../test/test-container-executor.c              | 20 +++++++++++++
 2 files changed, 49 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0838fe83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index baf0e8b..effeeee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2379,6 +2379,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+    fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+            strerror(errno));
+    return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+    if (strcmp(entry->d_name, ".") == 0) {
+      continue;
+    }
+    if (strcmp(entry->d_name, "..") == 0) {
+      continue;
+    }
+    fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+    return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2413,7 +2435,13 @@ int mount_cgroup(const char *pair, const char *hierarchy) {
     result = -1;
   } else {
     if (strstr(mount_path, "..") != NULL) {
-      fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+      fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+          mount_path);
+      result = INVALID_COMMAND_PROVIDED;
+      goto cleanup;
+    }
+    if (!is_empty(mount_path)) {
+      fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
       result = INVALID_COMMAND_PROVIDED;
       goto cleanup;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0838fe83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 3d32883..a199d84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,6 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+    printf("FAIL: / should not be empty\n");
+    exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+    printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+    exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+    printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+    exit(1);
+  }
+}
+
 // This test is expected to be executed either by a regular
 // user or by root. If executed by a regular user it doesn't
 // test all the functions that would depend on changing the
@@ -1264,6 +1281,9 @@ int main(int argc, char **argv) {
 
   printf("\nStarting tests\n");
 
+  printf("\ntest_is_empty()\n");
+  test_is_empty();
+
   printf("\nTesting recursive_unlink_children()\n");
   test_recursive_unlink_children();
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HADOOP-15581. Set default jetty log level to INFO in KMS. Contributed by Kitti Nanasi.

Posted by bh...@apache.org.
HADOOP-15581. Set default jetty log level to INFO in KMS. Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/895845e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/895845e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/895845e9

Branch: refs/heads/HDDS-48
Commit: 895845e9b0d7ac49da36b5cf773c6330afe4f3e0
Parents: def9d94
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Jul 9 12:06:25 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Jul 9 12:06:50 2018 -0700

----------------------------------------------------------------------
 .../hadoop-kms/src/main/conf/kms-log4j.properties                | 4 +++-
 .../hadoop-kms/src/test/resources/log4j.properties               | 4 +++-
 2 files changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/895845e9/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties b/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
index 04a3cf3..e2afd41 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
@@ -37,4 +37,6 @@ log4j.logger.org.apache.hadoop=INFO
 log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
 # make zookeeper log level an explicit config, and not changing with rootLogger.
 log4j.logger.org.apache.zookeeper=INFO
-log4j.logger.org.apache.curator=INFO
\ No newline at end of file
+log4j.logger.org.apache.curator=INFO
+# make jetty log level an explicit config, and not changing with rootLogger.
+log4j.logger.org.eclipse.jetty=INFO
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/895845e9/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
index e319af6..b8e6353 100644
--- a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
@@ -31,4 +31,6 @@ log4j.logger.org.apache.directory.server.core=OFF
 log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF
 # make zookeeper log level an explicit config, and not changing with rootLogger.
 log4j.logger.org.apache.zookeeper=INFO
-log4j.logger.org.apache.curator=INFO
\ No newline at end of file
+log4j.logger.org.apache.curator=INFO
+# make jetty log level an explicit config, and not changing with rootLogger.
+log4j.logger.org.eclipse.jetty=INFO
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: Fix compilation issues after merging trunk

Posted by bh...@apache.org.
Fix compilation issues after merging trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/860c5887
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/860c5887
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/860c5887

Branch: refs/heads/HDDS-48
Commit: 860c58875f97a44cf813decbea939579e50fe39b
Parents: c275a9a
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Jul 9 12:30:59 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Jul 9 12:30:59 2018 -0700

----------------------------------------------------------------------
 .../commandhandler/TestBlockDeletion.java       | 32 +++++++++++---------
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |  4 +--
 2 files changed, 19 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/860c5887/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 62059ec..c60c6c4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -34,9 +34,10 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -46,6 +47,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.File;
@@ -56,10 +58,11 @@ import java.util.function.Consumer;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 
+@Ignore("Need to be fixed according to ContainerIO")
 public class TestBlockDeletion {
   private static OzoneConfiguration conf = null;
   private static ObjectStore store;
-  private static ContainerManagerImpl dnContainerManager = null;
+  private static ContainerSet dnContainerManager = null;
   private static StorageContainerManager scm = null;
   private static OzoneManager om = null;
   private static Set<Long> containerIdsWithDeletedBlocks;
@@ -85,9 +88,8 @@ public class TestBlockDeletion {
         MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
     cluster.waitForClusterToBeReady();
     store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
-    dnContainerManager =
-        (ContainerManagerImpl) cluster.getHddsDatanodes().get(0)
-            .getDatanodeStateMachine().getContainer().getContainerManager();
+    dnContainerManager = cluster.getHddsDatanodes().get(0)
+        .getDatanodeStateMachine().getContainer().getContainerSet();
     om = cluster.getOzoneManager();
     scm = cluster.getStorageContainerManager();
     containerIdsWithDeletedBlocks = new HashSet<>();
@@ -148,8 +150,8 @@ public class TestBlockDeletion {
         Assert.assertEquals(
             scm.getContainerInfo(containerId).getDeleteTransactionId(), 0);
       }
-      Assert.assertEquals(dnContainerManager.readContainer(containerId)
-              .getDeleteTransactionId(),
+      Assert.assertEquals(dnContainerManager.getContainer(containerId)
+              .getContainerData().getDeleteTransactionId(),
           scm.getContainerInfo(containerId).getDeleteTransactionId());
     }
   }
@@ -159,9 +161,9 @@ public class TestBlockDeletion {
       throws IOException {
     return performOperationOnKeyContainers((blockID) -> {
       try {
-        MetadataStore db = KeyUtils.getDB(
-            dnContainerManager.getContainerMap().get(blockID.getContainerID()),
-            conf);
+        MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
+                dnContainerManager.getContainer(blockID.getContainerID())
+                    .getContainerData(), conf);
         Assert.assertNotNull(db.get(Longs.toByteArray(blockID.getLocalID())));
       } catch (IOException e) {
         e.printStackTrace();
@@ -174,9 +176,9 @@ public class TestBlockDeletion {
       throws IOException {
     return performOperationOnKeyContainers((blockID) -> {
       try {
-        MetadataStore db = KeyUtils.getDB(
-            dnContainerManager.getContainerMap().get(blockID.getContainerID()),
-            conf);
+        MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
+            dnContainerManager.getContainer(blockID.getContainerID())
+                .getContainerData(), conf);
         Assert.assertNull(db.get(Longs.toByteArray(blockID.getLocalID())));
         Assert.assertNull(db.get(DFSUtil.string2Bytes(
             OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID())));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/860c5887/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index cc11feb..722c1a5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -338,8 +338,8 @@ public class TestSCMCli {
 
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
     expected = String
-        .format(formatStr, container.getContainerID(), openStatus,
-            data.getDbFile().getPath(), data.getContainerPath(), "",
+        .format(formatStr, container.getContainerInfo().getContainerID(),
+            openStatus, data.getDbFile().getPath(), data.getContainerPath(), "",
             datanodeDetails.getHostName(), datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HADOOP-15568. fix some typos in the .sh comments. Contributed by Steve Loughran.

Posted by bh...@apache.org.
HADOOP-15568. fix some typos in the .sh comments. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a08ddfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a08ddfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a08ddfa

Branch: refs/heads/HDDS-48
Commit: 4a08ddfa68a405bfd97ffd96fafc1e3d48d20d7e
Parents: ea9b608
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jul 9 15:43:38 2018 -0400
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jul 9 15:43:38 2018 -0400

----------------------------------------------------------------------
 .../hadoop-common/src/main/conf/hadoop-env.sh                  | 6 +++---
 .../hadoop-common/src/main/conf/hadoop-metrics2.properties     | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a08ddfa/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 3826f67..6db085a 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -88,7 +88,7 @@
 # Extra Java runtime options for all Hadoop commands. We don't support
 # IPv6 yet/still, so by default the preference is set to IPv4.
 # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-# For Kerberos debugging, an extended option set logs more invormation
+# For Kerberos debugging, an extended option set logs more information
 # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
 
 # Some parts of the shell code may do special things dependent upon
@@ -120,9 +120,9 @@ esac
 #
 # By default, Apache Hadoop overrides Java's CLASSPATH
 # environment variable.  It is configured such
-# that it sarts out blank with new entries added after passing
+# that it starts out blank with new entries added after passing
 # a series of checks (file/dir exists, not already listed aka
-# de-deduplication).  During de-depulication, wildcards and/or
+# de-deduplication).  During de-deduplication, wildcards and/or
 # directories are *NOT* expanded to keep it simple. Therefore,
 # if the computed classpath has two specific mentions of
 # awesome-methods-1.0.jar, only the first one added will be seen.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a08ddfa/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
index 16fdcf0..f061313 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
@@ -47,7 +47,7 @@
 #*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
 
 # Tag values to use for the ganglia prefix. If not defined no tags are used.
-# If '*' all tags are used. If specifiying multiple tags separate them with 
+# If '*' all tags are used. If specifying multiple tags separate them with
 # commas. Note that the last segment of the property name is the context name.
 #
 # A typical use of tags is separating the metrics by the HDFS rpc port


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDDS-240. Implement metrics for EventQueue. Contributed by Elek, Marton.

Posted by bh...@apache.org.
HDDS-240. Implement metrics for EventQueue.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2403231c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2403231c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2403231c

Branch: refs/heads/HDDS-48
Commit: 2403231c8c3685ba08cd6bdf715d281cae611e45
Parents: 3c0a66a
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jul 9 13:04:44 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jul 9 13:04:44 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/server/events/EventQueue.java   | 108 +++++++++++--------
 .../server/events/SingleThreadExecutor.java     |  35 ++++--
 .../hdds/server/events/TestEventQueue.java      |  35 +-----
 3 files changed, 91 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2403231c/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 44d85f5..7e29223 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -18,7 +18,11 @@
 package org.apache.hadoop.hdds.server.events;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,6 +46,8 @@ public class EventQueue implements EventPublisher, AutoCloseable {
   private static final Logger LOG =
       LoggerFactory.getLogger(EventQueue.class);
 
+  private static final String EXECUTOR_NAME_SEPARATOR = "For";
+
   private final Map<Event, Map<EventExecutor, List<EventHandler>>> executors =
       new HashMap<>();
 
@@ -51,38 +57,74 @@ public class EventQueue implements EventPublisher, AutoCloseable {
 
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
       EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
-
-    this.addHandler(event, new SingleThreadExecutor<>(
-        event.getName()), handler);
+    this.addHandler(event, handler, generateHandlerName(handler));
   }
 
+  /**
+   * Add new handler to the event queue.
+   * <p>
+   * By default a separated single thread executor will be dedicated to
+   * deliver the events to the registered event handler.
+   *
+   * @param event        Triggering event.
+   * @param handler      Handler of event (will be called from a separated
+   *                     thread)
+   * @param handlerName  The name of handler (should be unique together with
+   *                     the event name)
+   * @param <PAYLOAD>    The type of the event payload.
+   * @param <EVENT_TYPE> The type of the event identifier.
+   */
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
-      EVENT_TYPE event,
-      EventExecutor<PAYLOAD> executor,
-      EventHandler<PAYLOAD> handler) {
+      EVENT_TYPE event, EventHandler<PAYLOAD> handler, String handlerName) {
+    validateEvent(event);
+    Preconditions.checkNotNull(handler, "Handler name should not be null.");
+    String executorName =
+        StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR
+            + handlerName;
+    this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
+  }
 
-    executors.putIfAbsent(event, new HashMap<>());
-    executors.get(event).putIfAbsent(executor, new ArrayList<>());
+  private <EVENT_TYPE extends Event<?>> void validateEvent(EVENT_TYPE event) {
+    Preconditions
+        .checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR),
+            "Event name should not contain " + EXECUTOR_NAME_SEPARATOR
+                + " string.");
 
-    executors.get(event)
-        .get(executor)
-        .add(handler);
+  }
+
+  private <PAYLOAD> String generateHandlerName(EventHandler<PAYLOAD> handler) {
+    if (!"".equals(handler.getClass().getSimpleName())) {
+      return handler.getClass().getSimpleName();
+    } else {
+      return handler.getClass().getName();
+    }
   }
 
   /**
-   * Creates one executor with multiple event handlers.
+   * Add event handler with custom executor.
+   *
+   * @param event        Triggering event.
+   * @param executor     The executor imlementation to deliver events from a
+   *                     separated threads. Please keep in your mind that
+   *                     registering metrics is the responsibility of the
+   *                     caller.
+   * @param handler      Handler of event (will be called from a separated
+   *                     thread)
+   * @param <PAYLOAD>    The type of the event payload.
+   * @param <EVENT_TYPE> The type of the event identifier.
    */
-  public void addHandlerGroup(String name, HandlerForEvent<?>...
-      eventsAndHandlers) {
-    SingleThreadExecutor sharedExecutor =
-        new SingleThreadExecutor(name);
-    for (HandlerForEvent handlerForEvent : eventsAndHandlers) {
-      addHandler(handlerForEvent.event, sharedExecutor,
-          handlerForEvent.handler);
-    }
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
+      EVENT_TYPE event, EventExecutor<PAYLOAD> executor,
+      EventHandler<PAYLOAD> handler) {
+    validateEvent(event);
+    executors.putIfAbsent(event, new HashMap<>());
+    executors.get(event).putIfAbsent(executor, new ArrayList<>());
 
+    executors.get(event).get(executor).add(handler);
   }
 
+
+
   /**
    * Route an event with payload to the right listener(s).
    *
@@ -183,31 +225,5 @@ public class EventQueue implements EventPublisher, AutoCloseable {
     });
   }
 
-  /**
-   * Event identifier together with the handler.
-   *
-   * @param <PAYLOAD>
-   */
-  public static class HandlerForEvent<PAYLOAD> {
-
-    private final Event<PAYLOAD> event;
-
-    private final EventHandler<PAYLOAD> handler;
-
-    public HandlerForEvent(
-        Event<PAYLOAD> event,
-        EventHandler<PAYLOAD> handler) {
-      this.event = event;
-      this.handler = handler;
-    }
-
-    public Event<PAYLOAD> getEvent() {
-      return event;
-    }
-
-    public EventHandler<PAYLOAD> getHandler() {
-      return handler;
-    }
-  }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2403231c/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
index a64e3d7..3253f2d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
@@ -23,13 +23,18 @@ import org.slf4j.LoggerFactory;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 
 /**
  * Simple EventExecutor to call all the event handler one-by-one.
  *
  * @param <T>
  */
+@Metrics(context = "EventQueue")
 public class SingleThreadExecutor<T> implements EventExecutor<T> {
 
   public static final String THREAD_NAME_PREFIX = "EventQueue";
@@ -41,14 +46,24 @@ public class SingleThreadExecutor<T> implements EventExecutor<T> {
 
   private final ThreadPoolExecutor executor;
 
-  private final AtomicLong queuedCount = new AtomicLong(0);
+  @Metric
+  private MutableCounterLong queued;
 
-  private final AtomicLong successfulCount = new AtomicLong(0);
+  @Metric
+  private MutableCounterLong done;
 
-  private final AtomicLong failedCount = new AtomicLong(0);
+  @Metric
+  private MutableCounterLong failed;
 
+  /**
+   * Create SingleThreadExecutor.
+   *
+   * @param name Unique name used in monitoring and metrics.
+   */
   public SingleThreadExecutor(String name) {
     this.name = name;
+    DefaultMetricsSystem.instance()
+        .register("EventQueue" + name, "Event Executor metrics ", this);
 
     LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>();
     executor =
@@ -64,31 +79,31 @@ public class SingleThreadExecutor<T> implements EventExecutor<T> {
   @Override
   public void onMessage(EventHandler<T> handler, T message, EventPublisher
       publisher) {
-    queuedCount.incrementAndGet();
+    queued.incr();
     executor.execute(() -> {
       try {
         handler.onMessage(message, publisher);
-        successfulCount.incrementAndGet();
+        done.incr();
       } catch (Exception ex) {
         LOG.error("Error on execution message {}", message, ex);
-        failedCount.incrementAndGet();
+        failed.incr();
       }
     });
   }
 
   @Override
   public long failedEvents() {
-    return failedCount.get();
+    return failed.value();
   }
 
   @Override
   public long successfulEvents() {
-    return successfulCount.get();
+    return done.value();
   }
 
   @Override
   public long queuedEvents() {
-    return queuedCount.get();
+    return queued.value();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2403231c/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
index 3944409..2bdf705 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -25,6 +25,8 @@ import org.junit.Test;
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
 /**
  * Testing the basic functionality of the event queue.
  */
@@ -44,11 +46,13 @@ public class TestEventQueue {
 
   @Before
   public void startEventQueue() {
+    DefaultMetricsSystem.initialize(getClass().getSimpleName());
     queue = new EventQueue();
   }
 
   @After
   public void stopEventQueue() {
+    DefaultMetricsSystem.shutdown();
     queue.close();
   }
 
@@ -79,35 +83,4 @@ public class TestEventQueue {
 
   }
 
-  @Test
-  public void handlerGroup() {
-    final long[] result = new long[2];
-    queue.addHandlerGroup(
-        "group",
-        new EventQueue.HandlerForEvent<>(EVENT3, (payload, publisher) ->
-            result[0] = payload),
-        new EventQueue.HandlerForEvent<>(EVENT4, (payload, publisher) ->
-            result[1] = payload)
-    );
-
-    queue.fireEvent(EVENT3, 23L);
-    queue.fireEvent(EVENT4, 42L);
-
-    queue.processAll(1000);
-
-    Assert.assertEquals(23, result[0]);
-    Assert.assertEquals(42, result[1]);
-
-    Set<String> eventQueueThreadNames =
-        Thread.getAllStackTraces().keySet()
-            .stream()
-            .filter(t -> t.getName().startsWith(SingleThreadExecutor
-                .THREAD_NAME_PREFIX))
-            .map(Thread::getName)
-            .collect(Collectors.toSet());
-    System.out.println(eventQueueThreadNames);
-    Assert.assertEquals(1, eventQueueThreadNames.size());
-
-  }
-
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'origin/trunk' into HDDS-48-merge

Posted by bh...@apache.org.
Merge remote-tracking branch 'origin/trunk' into HDDS-48-merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1baaff8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1baaff8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1baaff8

Branch: refs/heads/HDDS-48
Commit: d1baaff8cbe16d2c7da8ca24948b31ef52fd529a
Parents: da507af 2403231
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Jul 9 13:14:20 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Jul 9 13:14:20 2018 -0700

----------------------------------------------------------------------

----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDDS-235. Fix TestOzoneAuditLogger#verifyDefaultLogLevel. Contributed by Xiaoyu Yao.

Posted by bh...@apache.org.
HDDS-235. Fix TestOzoneAuditLogger#verifyDefaultLogLevel.
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/790c5635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/790c5635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/790c5635

Branch: refs/heads/HDDS-48
Commit: 790c563511161c901b7b667e787baca8725f9249
Parents: 2f51cd6
Author: Anu Engineer <ae...@apache.org>
Authored: Sun Jul 8 11:27:54 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sun Jul 8 11:27:54 2018 -0700

----------------------------------------------------------------------
 .../ozone/audit/TestOzoneAuditLogger.java       | 46 +++++++++++++-------
 1 file changed, 31 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/790c5635/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
index d3cc9e4..57a7d9e 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
@@ -100,7 +100,7 @@ public class TestOzoneAuditLogger {
     AUDIT.logReadFailure(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.ERROR);
     AUDIT.logReadFailure(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.ERROR,
         new Exception("test"));
-    verifyLog(null);
+    verifyNoLog();
   }
 
   /**
@@ -110,22 +110,38 @@ public class TestOzoneAuditLogger {
   public void notLogDebugEvents() throws IOException {
     AUDIT.logWriteSuccess(DummyAction.CREATE_VOLUME, auditableObj.toAuditMap(), Level.DEBUG);
     AUDIT.logReadSuccess(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.DEBUG);
-    verifyLog(null);
+    verifyNoLog();
   }
 
-  public void verifyLog(String expected) throws IOException {
-      File file = new File("audit.log");
-      List<String> lines = FileUtils.readLines(file, (String)null);
-      if(expected == null){
-        // When no log entry is expected, the log file must be empty
-        assertTrue(lines.size() == 0);
-      } else {
-        // When log entry is expected, the log file will contain one line and
-        // that must be equal to the expected string
-        assertTrue(expected.equalsIgnoreCase(lines.get(0)));
-        //empty the file
-        lines.remove(0);
-        FileUtils.writeLines(file, lines, false);
+  private void verifyLog(String expected) throws IOException {
+    File file = new File("audit.log");
+    List<String> lines = FileUtils.readLines(file, (String)null);
+      final int retry = 5;
+      int i = 0;
+      while (lines.isEmpty() && i < retry) {
+        lines = FileUtils.readLines(file, (String)null);
+        try {
+          Thread.sleep( 500 * (i + 1));
+        } catch(InterruptedException ie) {
+          Thread.currentThread().interrupt();
+          break;
+        }
+        i++;
       }
+
+      // When log entry is expected, the log file will contain one line and
+      // that must be equal to the expected string
+      assertTrue(lines.size() != 0);
+      assertTrue(expected.equalsIgnoreCase(lines.get(0)));
+      //empty the file
+      lines.remove(0);
+      FileUtils.writeLines(file, lines, false);
+  }
+
+  private void verifyNoLog() throws IOException {
+    File file = new File("audit.log");
+    List<String> lines = FileUtils.readLines(file, (String)null);
+    // When no log entry is expected, the log file must be empty
+    assertTrue(lines.size() == 0);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java
deleted file mode 100644
index 5ec1db8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Handles key level commands.
- */
-public interface KeyManager {
-
-  /**
-   * Start key manager.
-   */
-  void start();
-
-  /**
-   * Stop key manager.
-   */
-  void stop() throws IOException;
-
-  /**
-   * After calling commit, the key will be made visible. There can be multiple
-   * open key writes in parallel (identified by client id). The most recently
-   * committed one will be the one visible.
-   *
-   * @param args the key to commit.
-   * @param clientID the client that is committing.
-   * @throws IOException
-   */
-  void commitKey(KsmKeyArgs args, int clientID) throws IOException;
-
-  /**
-   * A client calls this on an open key, to request to allocate a new block,
-   * and appended to the tail of current block list of the open client.
-   *
-   * @param args the key to append
-   * @param clientID the client requesting block.
-   * @return the reference to the new block.
-   * @throws IOException
-   */
-  KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException;
-  /**
-   * Given the args of a key to put, write an open key entry to meta data.
-   *
-   * In case that the container creation or key write failed on
-   * DistributedStorageHandler, this key's metadata will still stay in KSM.
-   * TODO garbage collect the open keys that never get closed
-   *
-   * @param args the args of the key provided by client.
-   * @return a OpenKeySession instance client uses to talk to container.
-   * @throws Exception
-   */
-  OpenKeySession openKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Look up an existing key. Return the info of the key to client side, which
-   * DistributedStorageHandler will use to access the data on datanode.
-   *
-   * @param args the args of the key provided by client.
-   * @return a KsmKeyInfo instance client uses to talk to container.
-   * @throws IOException
-   */
-  KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Renames an existing key within a bucket.
-   *
-   * @param args the args of the key provided by client.
-   * @param toKeyName New name to be used for the key
-   * @throws IOException if specified key doesn't exist or
-   * some other I/O errors while renaming the key.
-   */
-  void renameKey(KsmKeyArgs args, String toKeyName) throws IOException;
-
-  /**
-   * Deletes an object by an object key. The key will be immediately removed
-   * from KSM namespace and become invisible to clients. The object data
-   * will be removed in async manner that might retain for some time.
-   *
-   * @param args the args of the key provided by client.
-   * @throws IOException if specified key doesn't exist or
-   * some other I/O errors while deleting an object.
-   */
-  void deleteKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link KsmKeyInfo}
-   * in the given bucket.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKey
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   *   This key is excluded from the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<KsmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKey, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns a list of pending deletion key info that ups to the given count.
-   * Each entry is a {@link BlockGroup}, which contains the info about the
-   * key name and all its associated block IDs. A pending deletion key is
-   * stored with #deleting# prefix in KSM DB.
-   *
-   * @param count max number of keys to return.
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
-
-  /**
-   * Deletes a pending deletion key by its name. This is often called when
-   * key can be safely deleted from this layer. Once called, all footprints
-   * of the key will be purged from KSM DB.
-   *
-   * @param objectKeyName object key name with #deleting# prefix.
-   * @throws IOException if specified key doesn't exist or other I/O errors.
-   */
-  void deletePendingDeletionKey(String objectKeyName) throws IOException;
-
-  /**
-   * Returns a list of all still open key info. Which contains the info about
-   * the key name and all its associated block IDs. A pending open key has
-   * prefix #open# in KSM DB.
-   *
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getExpiredOpenKeys() throws IOException;
-
-  /**
-   * Deletes a expired open key by its name. Called when a hanging key has been
-   * lingering for too long. Once called, the open key entries gets removed
-   * from KSM mdata data.
-   *
-   * @param objectKeyName object key name with #open# prefix.
-   * @throws IOException if specified key doesn't exist or other I/O errors.
-   */
-  void deleteExpiredOpenKey(String objectKeyName) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
deleted file mode 100644
index 0d4cfda..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
+++ /dev/null
@@ -1,566 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.BatchOperation;
-import org.iq80.leveldb.DBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB;
-import org.apache.hadoop.hdds.protocol
-    .proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol
-    .proto.HddsProtos.ReplicationFactor;
-
-
-/**
- * Implementation of keyManager.
- */
-public class KeyManagerImpl implements KeyManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyManagerImpl.class);
-
-  /**
-   * A SCM block client, used to talk to SCM to allocate block during putKey.
-   */
-  private final ScmBlockLocationProtocol scmBlockClient;
-  private final KSMMetadataManager metadataManager;
-  private final long scmBlockSize;
-  private final boolean useRatis;
-  private final BackgroundService keyDeletingService;
-  private final BackgroundService openKeyCleanupService;
-
-  private final long preallocateMax;
-  private final Random random;
-  private final String ksmId;
-
-  public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
-      KSMMetadataManager metadataManager, OzoneConfiguration conf,
-      String ksmId) {
-    this.scmBlockClient = scmBlockClient;
-    this.metadataManager = metadataManager;
-    this.scmBlockSize = conf.getLong(OZONE_SCM_BLOCK_SIZE_IN_MB,
-        OZONE_SCM_BLOCK_SIZE_DEFAULT) * OzoneConsts.MB;
-    this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY,
-        DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-    long  blockDeleteInterval = conf.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    long serviceTimeout = conf.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    this.preallocateMax = conf.getLong(
-        OZONE_KEY_PREALLOCATION_MAXSIZE,
-        OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT);
-    keyDeletingService = new KeyDeletingService(
-        scmBlockClient, this, blockDeleteInterval, serviceTimeout, conf);
-    int openkeyCheckInterval = conf.getInt(
-        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS,
-        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT);
-    openKeyCleanupService = new OpenKeyCleanupService(
-        scmBlockClient, this, openkeyCheckInterval, serviceTimeout);
-    random = new Random();
-    this.ksmId = ksmId;
-  }
-
-  @VisibleForTesting
-  public BackgroundService getOpenKeyCleanupService() {
-    return openKeyCleanupService;
-  }
-
-  @Override
-  public void start() {
-    keyDeletingService.start();
-    openKeyCleanupService.start();
-  }
-
-  @Override
-  public void stop() throws IOException {
-    keyDeletingService.shutdown();
-    openKeyCleanupService.shutdown();
-  }
-
-  private void validateBucket(String volumeName, String bucketName)
-      throws IOException {
-    byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
-    byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-
-    //Check if the volume exists
-    if(metadataManager.get(volumeKey) == null) {
-      LOG.error("volume not found: {}", volumeName);
-      throw new KSMException("Volume not found",
-          KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-    //Check if bucket already exists
-    if(metadataManager.get(bucketKey) == null) {
-      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
-      throw new KSMException("Bucket not found",
-          KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-    }
-  }
-
-  @Override
-  public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-
-    try {
-      validateBucket(volumeName, bucketName);
-      String objectKey = metadataManager.getKeyWithDBPrefix(
-          volumeName, bucketName, keyName);
-      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
-      byte[] keyData = metadataManager.get(openKey);
-      if (keyData == null) {
-        LOG.error("Allocate block for a key not in open status in meta store " +
-            objectKey + " with ID " + clientID);
-        throw new KSMException("Open Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      KsmKeyInfo keyInfo =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData));
-      AllocatedBlock allocatedBlock =
-          scmBlockClient.allocateBlock(scmBlockSize, keyInfo.getType(),
-              keyInfo.getFactor(), ksmId);
-      KsmKeyLocationInfo info = new KsmKeyLocationInfo.Builder()
-          .setBlockID(allocatedBlock.getBlockID())
-          .setShouldCreateContainer(allocatedBlock.getCreateContainer())
-          .setLength(scmBlockSize)
-          .setOffset(0)
-          .build();
-      // current version not committed, so new blocks coming now are added to
-      // the same version
-      keyInfo.appendNewBlocks(Collections.singletonList(info));
-      keyInfo.updateModifcationTime();
-      metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
-      return info;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public OpenKeySession openKey(KsmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    ReplicationFactor factor = args.getFactor();
-    ReplicationType type = args.getType();
-
-    // If user does not specify a replication strategy or
-    // replication factor, KSM will use defaults.
-    if(factor == null) {
-      factor = useRatis ? ReplicationFactor.THREE: ReplicationFactor.ONE;
-    }
-
-    if(type == null) {
-      type = useRatis ? ReplicationType.RATIS : ReplicationType.STAND_ALONE;
-    }
-
-    try {
-      validateBucket(volumeName, bucketName);
-      long requestedSize = Math.min(preallocateMax, args.getDataSize());
-      List<KsmKeyLocationInfo> locations = new ArrayList<>();
-      String objectKey = metadataManager.getKeyWithDBPrefix(
-          volumeName, bucketName, keyName);
-      // requested size is not required but more like a optimization:
-      // SCM looks at the requested, if it 0, no block will be allocated at
-      // the point, if client needs more blocks, client can always call
-      // allocateBlock. But if requested size is not 0, KSM will preallocate
-      // some blocks and piggyback to client, to save RPC calls.
-      while (requestedSize > 0) {
-        long allocateSize = Math.min(scmBlockSize, requestedSize);
-        AllocatedBlock allocatedBlock =
-            scmBlockClient.allocateBlock(allocateSize, type, factor, ksmId);
-        KsmKeyLocationInfo subKeyInfo = new KsmKeyLocationInfo.Builder()
-            .setBlockID(allocatedBlock.getBlockID())
-            .setShouldCreateContainer(allocatedBlock.getCreateContainer())
-            .setLength(allocateSize)
-            .setOffset(0)
-            .build();
-        locations.add(subKeyInfo);
-        requestedSize -= allocateSize;
-      }
-      // NOTE size of a key is not a hard limit on anything, it is a value that
-      // client should expect, in terms of current size of key. If client sets a
-      // value, then this value is used, otherwise, we allocate a single block
-      // which is the current size, if read by the client.
-      long size = args.getDataSize() >= 0 ? args.getDataSize() : scmBlockSize;
-      byte[] keyKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, keyName);
-      byte[] value = metadataManager.get(keyKey);
-      KsmKeyInfo keyInfo;
-      long openVersion;
-      if (value != null) {
-        // the key already exist, the new blocks will be added as new version
-        keyInfo = KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
-        // when locations.size = 0, the new version will have identical blocks
-        // as its previous version
-        openVersion = keyInfo.addNewVersion(locations);
-        keyInfo.setDataSize(size + keyInfo.getDataSize());
-      } else {
-        // the key does not exist, create a new object, the new blocks are the
-        // version 0
-        long currentTime = Time.now();
-        keyInfo = new KsmKeyInfo.Builder()
-            .setVolumeName(args.getVolumeName())
-            .setBucketName(args.getBucketName())
-            .setKeyName(args.getKeyName())
-            .setKsmKeyLocationInfos(Collections.singletonList(
-                new KsmKeyLocationInfoGroup(0, locations)))
-            .setCreationTime(currentTime)
-            .setModificationTime(currentTime)
-            .setDataSize(size)
-            .setReplicationType(type)
-            .setReplicationFactor(factor)
-            .build();
-        openVersion = 0;
-      }
-      // Generate a random ID which is not already in meta db.
-      int id = -1;
-      // in general this should finish in a couple times at most. putting some
-      // arbitrary large number here to avoid dead loop.
-      for (int j = 0; j < 10000; j++) {
-        id = random.nextInt();
-        byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, id);
-        if (metadataManager.get(openKey) == null) {
-          metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
-          break;
-        }
-      }
-      if (id == -1) {
-        throw new IOException("Failed to find a usable id for " + objectKey);
-      }
-      LOG.debug("Key {} allocated in volume {} bucket {}",
-          keyName, volumeName, bucketName);
-      return new OpenKeySession(id, keyInfo, openVersion);
-    } catch (KSMException e) {
-      throw e;
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Key open failed for volume:{} bucket:{} key:{}",
-            volumeName, bucketName, keyName, ex);
-      }
-      throw new KSMException(ex.getMessage(),
-          KSMException.ResultCodes.FAILED_KEY_ALLOCATION);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void commitKey(KsmKeyArgs args, int clientID) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    try {
-      validateBucket(volumeName, bucketName);
-      String objectKey = metadataManager.getKeyWithDBPrefix(
-          volumeName, bucketName, keyName);
-      byte[] objectKeyBytes = metadataManager.getDBKeyBytes(volumeName,
-          bucketName, keyName);
-      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
-      byte[] openKeyData = metadataManager.get(openKey);
-      if (openKeyData == null) {
-        throw new KSMException("Commit a key without corresponding entry " +
-            DFSUtil.bytes2String(openKey), ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      KsmKeyInfo keyInfo =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(openKeyData));
-      keyInfo.setDataSize(args.getDataSize());
-      keyInfo.setModificationTime(Time.now());
-      BatchOperation batch = new BatchOperation();
-      batch.delete(openKey);
-      batch.put(objectKeyBytes, keyInfo.getProtobuf().toByteArray());
-      metadataManager.writeBatch(batch);
-    } catch (KSMException e) {
-      throw e;
-    } catch (IOException ex) {
-      LOG.error("Key commit failed for volume:{} bucket:{} key:{}",
-          volumeName, bucketName, keyName, ex);
-      throw new KSMException(ex.getMessage(),
-          KSMException.ResultCodes.FAILED_KEY_ALLOCATION);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    try {
-      byte[] keyKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, keyName);
-      byte[] value = metadataManager.get(keyKey);
-      if (value == null) {
-        LOG.debug("volume:{} bucket:{} Key:{} not found",
-            volumeName, bucketName, keyName);
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      return KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
-    } catch (DBException ex) {
-      LOG.error("Get key failed for volume:{} bucket:{} key:{}",
-          volumeName, bucketName, keyName, ex);
-      throw new KSMException(ex.getMessage(),
-          KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException {
-    Preconditions.checkNotNull(args);
-    Preconditions.checkNotNull(toKeyName);
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String fromKeyName = args.getKeyName();
-    if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
-      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
-          volumeName, bucketName, fromKeyName, toKeyName);
-      throw new KSMException("Key name is empty",
-          ResultCodes.FAILED_INVALID_KEY_NAME);
-    }
-
-    metadataManager.writeLock().lock();
-    try {
-      // fromKeyName should exist
-      byte[] fromKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, fromKeyName);
-      byte[] fromKeyValue = metadataManager.get(fromKey);
-      if (fromKeyValue == null) {
-        // TODO: Add support for renaming open key
-        LOG.error(
-            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
-                + "Key: {} not found.", volumeName, bucketName, fromKeyName,
-            toKeyName, fromKeyName);
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-
-      // toKeyName should not exist
-      byte[] toKey =
-          metadataManager.getDBKeyBytes(volumeName, bucketName, toKeyName);
-      byte[] toKeyValue = metadataManager.get(toKey);
-      if (toKeyValue != null) {
-        LOG.error(
-            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
-                + "Key: {} already exists.", volumeName, bucketName,
-            fromKeyName, toKeyName, toKeyName);
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_ALREADY_EXISTS);
-      }
-
-      if (fromKeyName.equals(toKeyName)) {
-        return;
-      }
-
-      KsmKeyInfo newKeyInfo =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(fromKeyValue));
-      newKeyInfo.setKeyName(toKeyName);
-      newKeyInfo.updateModifcationTime();
-      BatchOperation batch = new BatchOperation();
-      batch.delete(fromKey);
-      batch.put(toKey, newKeyInfo.getProtobuf().toByteArray());
-      metadataManager.writeBatch(batch);
-    } catch (DBException ex) {
-      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
-          volumeName, bucketName, fromKeyName, toKeyName, ex);
-      throw new KSMException(ex.getMessage(),
-          ResultCodes.FAILED_KEY_RENAME);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void deleteKey(KsmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    try {
-      byte[] objectKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, keyName);
-      byte[] objectValue = metadataManager.get(objectKey);
-      if (objectValue == null) {
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      byte[] deletingKey = metadataManager.getDeletedKeyName(objectKey);
-      BatchOperation batch = new BatchOperation();
-      batch.put(deletingKey, objectValue);
-      batch.delete(objectKey);
-      metadataManager.writeBatch(batch);
-    } catch (DBException ex) {
-      LOG.error(String.format("Delete key failed for volume:%s "
-          + "bucket:%s key:%s", volumeName, bucketName, keyName), ex);
-      throw new KSMException(ex.getMessage(), ex,
-          ResultCodes.FAILED_KEY_DELETION);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.listKeys(volumeName, bucketName,
-          startKey, keyPrefix, maxKeys);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  @Override
-  public List<BlockGroup> getPendingDeletionKeys(final int count)
-      throws IOException {
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.getPendingDeletionKeys(count);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  @Override
-  public void deletePendingDeletionKey(String objectKeyName)
-      throws IOException{
-    Preconditions.checkNotNull(objectKeyName);
-    if (!objectKeyName.startsWith(OzoneConsts.DELETING_KEY_PREFIX)) {
-      throw new IllegalArgumentException("Invalid key name,"
-          + " the name should be the key name with deleting prefix");
-    }
-
-    // Simply removes the entry from KSM DB.
-    metadataManager.writeLock().lock();
-    try {
-      byte[] pendingDelKey = DFSUtil.string2Bytes(objectKeyName);
-      byte[] delKeyValue = metadataManager.get(pendingDelKey);
-      if (delKeyValue == null) {
-        throw new IOException("Failed to delete key " + objectKeyName
-            + " because it is not found in DB");
-      }
-      metadataManager.delete(pendingDelKey);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.getExpiredOpenKeys();
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  @Override
-  public void deleteExpiredOpenKey(String objectKeyName) throws IOException {
-    Preconditions.checkNotNull(objectKeyName);
-    if (!objectKeyName.startsWith(OzoneConsts.OPEN_KEY_PREFIX)) {
-      throw new IllegalArgumentException("Invalid key name,"
-          + " the name should be the key name with open key prefix");
-    }
-
-    // Simply removes the entry from KSM DB.
-    metadataManager.writeLock().lock();
-    try {
-      byte[] openKey = DFSUtil.string2Bytes(objectKeyName);
-      byte[] delKeyValue = metadataManager.get(openKey);
-      if (delKeyValue == null) {
-        throw new IOException("Failed to delete key " + objectKeyName
-            + " because it is not found in DB");
-      }
-      metadataManager.delete(openKey);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
deleted file mode 100644
index 5fa313b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ /dev/null
@@ -1,912 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.protobuf.BlockingService;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos
-    .ServicePort;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.protocolPB
-    .KeySpaceManagerProtocolServerSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.GenericOptionsParser;
-import org.apache.hadoop.util.StringUtils;
-
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.hdds.HddsUtils.isHddsEnabled;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress;
-import static org.apache.hadoop.hdds.server.ServerUtils
-    .updateRPCListenAddress;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeySpaceManagerService
-    .newReflectiveBlockingService;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .NodeState.HEALTHY;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-/**
- * Ozone Keyspace manager is the metadata manager of ozone.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public final class KeySpaceManager extends ServiceRuntimeInfoImpl
-    implements KeySpaceManagerProtocol, KSMMXBean {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeySpaceManager.class);
-
-  private static final String USAGE =
-      "Usage: \n ozone ksm [genericOptions] " + "[ "
-          + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone ksm [ "
-          + StartupOption.HELP.getName() + " ]\n";
-
-  /** Startup options. */
-  public enum StartupOption {
-    CREATEOBJECTSTORE("-createObjectStore"),
-    HELP("-help"),
-    REGULAR("-regular");
-
-    private final String name;
-
-    StartupOption(String arg) {
-      this.name = arg;
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public static StartupOption parse(String value) {
-      for (StartupOption option : StartupOption.values()) {
-        if (option.name.equalsIgnoreCase(value)) {
-          return option;
-        }
-      }
-      return null;
-    }
-  }
-
-  private final OzoneConfiguration configuration;
-  private final RPC.Server ksmRpcServer;
-  private final InetSocketAddress ksmRpcAddress;
-  private final KSMMetadataManager metadataManager;
-  private final VolumeManager volumeManager;
-  private final BucketManager bucketManager;
-  private final KeyManager keyManager;
-  private final KSMMetrics metrics;
-  private final KeySpaceManagerHttpServer httpServer;
-  private final KSMStorage ksmStorage;
-  private final ScmBlockLocationProtocol scmBlockClient;
-  private final StorageContainerLocationProtocol scmContainerClient;
-  private ObjectName ksmInfoBeanName;
-
-  private KeySpaceManager(OzoneConfiguration conf) throws IOException {
-    Preconditions.checkNotNull(conf);
-    configuration = conf;
-    ksmStorage = new KSMStorage(conf);
-    scmBlockClient = getScmBlockClient(configuration);
-    scmContainerClient = getScmContainerClient(configuration);
-    if (ksmStorage.getState() != StorageState.INITIALIZED) {
-      throw new KSMException("KSM not initialized.",
-          ResultCodes.KSM_NOT_INITIALIZED);
-    }
-
-    // verifies that the SCM info in the KSM Version file is correct.
-    ScmInfo scmInfo = scmBlockClient.getScmInfo();
-    if (!(scmInfo.getClusterId().equals(ksmStorage.getClusterID()) && scmInfo
-        .getScmId().equals(ksmStorage.getScmId()))) {
-      throw new KSMException("SCM version info mismatch.",
-          ResultCodes.SCM_VERSION_MISMATCH_ERROR);
-    }
-    final int handlerCount = conf.getInt(OZONE_KSM_HANDLER_COUNT_KEY,
-        OZONE_KSM_HANDLER_COUNT_DEFAULT);
-
-    RPC.setProtocolEngine(configuration, KeySpaceManagerProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    BlockingService ksmService = newReflectiveBlockingService(
-        new KeySpaceManagerProtocolServerSideTranslatorPB(this));
-    final InetSocketAddress ksmNodeRpcAddr =
-        getKsmAddress(configuration);
-    ksmRpcServer = startRpcServer(configuration, ksmNodeRpcAddr,
-        KeySpaceManagerProtocolPB.class, ksmService,
-        handlerCount);
-    ksmRpcAddress = updateRPCListenAddress(configuration,
-        OZONE_KSM_ADDRESS_KEY, ksmNodeRpcAddr, ksmRpcServer);
-    metadataManager = new KSMMetadataManagerImpl(configuration);
-    volumeManager = new VolumeManagerImpl(metadataManager, configuration);
-    bucketManager = new BucketManagerImpl(metadataManager);
-    metrics = KSMMetrics.create();
-    keyManager =
-        new KeyManagerImpl(scmBlockClient, metadataManager, configuration,
-            ksmStorage.getKsmId());
-    httpServer = new KeySpaceManagerHttpServer(configuration, this);
-  }
-
-  /**
-   * Create a scm block client, used by putKey() and getKey().
-   *
-   * @return {@link ScmBlockLocationProtocol}
-   * @throws IOException
-   */
-  private static ScmBlockLocationProtocol getScmBlockClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress scmBlockAddress =
-        getScmAddressForBlockClients(conf);
-    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
-        new ScmBlockLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
-                scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return scmBlockLocationClient;
-  }
-
-  /**
-   * Returns a scm container client.
-   *
-   * @return {@link StorageContainerLocationProtocol}
-   * @throws IOException
-   */
-  private static StorageContainerLocationProtocol getScmContainerClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddr = getScmAddressForClients(
-        conf);
-    StorageContainerLocationProtocolClientSideTranslatorPB scmContainerClient =
-        new StorageContainerLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
-                scmAddr, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return scmContainerClient;
-  }
-
-  @VisibleForTesting
-  public KeyManager getKeyManager() {
-    return keyManager;
-  }
-
-  @VisibleForTesting
-  public ScmInfo getScmInfo() throws IOException {
-    return scmBlockClient.getScmInfo();
-  }
-
-  @VisibleForTesting
-  public KSMStorage getKsmStorage() {
-    return ksmStorage;
-  }
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   *
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  private static RPC.Server startRpcServer(OzoneConfiguration conf,
-      InetSocketAddress addr, Class<?> protocol, BlockingService instance,
-      int handlerCount) throws IOException {
-    RPC.Server rpcServer = new RPC.Builder(conf)
-        .setProtocol(protocol)
-        .setInstance(instance)
-        .setBindAddress(addr.getHostString())
-        .setPort(addr.getPort())
-        .setNumHandlers(handlerCount)
-        .setVerbose(false)
-        .setSecretManager(null)
-        .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-    return rpcServer;
-  }
-
-  /**
-   * Get metadata manager.
-   * @return metadata manager.
-   */
-  public KSMMetadataManager getMetadataManager() {
-    return metadataManager;
-  }
-
-  public KSMMetrics getMetrics() {
-    return metrics;
-  }
-
-  /**
-   * Main entry point for starting KeySpaceManager.
-   *
-   * @param argv arguments
-   * @throws IOException if startup fails due to I/O error
-   */
-  public static void main(String[] argv) throws IOException {
-    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-    try {
-      OzoneConfiguration conf = new OzoneConfiguration();
-      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
-      if (!hParser.isParseSuccessful()) {
-        System.err.println("USAGE: " + USAGE + " \n");
-        hParser.printGenericCommandUsage(System.err);
-        System.exit(1);
-      }
-      StringUtils.startupShutdownMessage(KeySpaceManager.class, argv, LOG);
-      KeySpaceManager ksm = createKSM(hParser.getRemainingArgs(), conf);
-      if (ksm != null) {
-        ksm.start();
-        ksm.join();
-      }
-    } catch (Throwable t) {
-      LOG.error("Failed to start the KeyspaceManager.", t);
-      terminate(1, t);
-    }
-  }
-
-  private static void printUsage(PrintStream out) {
-    out.println(USAGE + "\n");
-  }
-
-  /**
-   * Constructs KSM instance based on command line arguments.
-   * @param argv Command line arguments
-   * @param conf OzoneConfiguration
-   * @return KSM instance
-   * @throws IOException in case KSM instance creation fails.
-   */
-
-  public static KeySpaceManager createKSM(String[] argv,
-      OzoneConfiguration conf) throws IOException {
-    if (!isHddsEnabled(conf)) {
-      System.err.println("KSM cannot be started in secure mode or when " +
-          OZONE_ENABLED + " is set to false");
-      System.exit(1);
-    }
-    StartupOption startOpt = parseArguments(argv);
-    if (startOpt == null) {
-      printUsage(System.err);
-      terminate(1);
-      return null;
-    }
-    switch (startOpt) {
-    case CREATEOBJECTSTORE:
-      terminate(ksmInit(conf) ? 0 : 1);
-      return null;
-    case HELP:
-      printUsage(System.err);
-      terminate(0);
-      return null;
-    default:
-      return new KeySpaceManager(conf);
-    }
-  }
-
-  /**
-   * Initializes the KSM instance.
-   * @param conf OzoneConfiguration
-   * @return true if KSM initialization succeeds , false otherwise
-   * @throws IOException in case ozone metadata directory path is not accessible
-   */
-
-  private static boolean ksmInit(OzoneConfiguration conf) throws IOException {
-    KSMStorage ksmStorage = new KSMStorage(conf);
-    StorageState state = ksmStorage.getState();
-    if (state != StorageState.INITIALIZED) {
-      try {
-        ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(conf);
-        ScmInfo scmInfo = scmBlockClient.getScmInfo();
-        String clusterId = scmInfo.getClusterId();
-        String scmId = scmInfo.getScmId();
-        if (clusterId == null || clusterId.isEmpty()) {
-          throw new IOException("Invalid Cluster ID");
-        }
-        if (scmId == null || scmId.isEmpty()) {
-          throw new IOException("Invalid SCM ID");
-        }
-        ksmStorage.setClusterId(clusterId);
-        ksmStorage.setScmId(scmId);
-        ksmStorage.initialize();
-        System.out.println(
-            "KSM initialization succeeded.Current cluster id for sd="
-                + ksmStorage.getStorageDir() + ";cid=" + ksmStorage
-                .getClusterID());
-        return true;
-      } catch (IOException ioe) {
-        LOG.error("Could not initialize KSM version file", ioe);
-        return false;
-      }
-    } else {
-      System.out.println(
-          "KSM already initialized.Reusing existing cluster id for sd="
-              + ksmStorage.getStorageDir() + ";cid=" + ksmStorage
-              .getClusterID());
-      return true;
-    }
-  }
-
-  /**
-   * Parses the command line options for KSM initialization.
-   * @param args command line arguments
-   * @return StartupOption if options are valid, null otherwise
-   */
-  private static StartupOption parseArguments(String[] args) {
-    if (args == null || args.length == 0) {
-      return StartupOption.REGULAR;
-    } else if (args.length == 1) {
-      return StartupOption.parse(args[0]);
-    }
-    return null;
-  }
-
-  /**
-   * Builds a message for logging startup information about an RPC server.
-   *
-   * @param description RPC server description
-   * @param addr RPC server listening address
-   * @return server startup message
-   */
-  private static String buildRpcServerStartMessage(String description,
-      InetSocketAddress addr) {
-    return addr != null ? String.format("%s is listening at %s",
-        description, addr.toString()) :
-        String.format("%s not started", description);
-  }
-
-  /**
-   * Start service.
-   */
-  public void start() throws IOException {
-    LOG.info(buildRpcServerStartMessage("KeyspaceManager RPC server",
-        ksmRpcAddress));
-    DefaultMetricsSystem.initialize("KeySpaceManager");
-    metadataManager.start();
-    keyManager.start();
-    ksmRpcServer.start();
-    httpServer.start();
-    registerMXBean();
-    setStartTime();
-  }
-
-  /**
-   * Stop service.
-   */
-  public void stop() {
-    try {
-      metadataManager.stop();
-      ksmRpcServer.stop();
-      keyManager.stop();
-      httpServer.stop();
-      metrics.unRegister();
-      unregisterMXBean();
-    } catch (Exception e) {
-      LOG.error("Key Space Manager stop failed.", e);
-    }
-  }
-
-  /**
-   * Wait until service has completed shutdown.
-   */
-  public void join() {
-    try {
-      ksmRpcServer.join();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.info("Interrupted during KeyspaceManager join.", e);
-    }
-  }
-
-  /**
-   * Creates a volume.
-   *
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  @Override
-  public void createVolume(KsmVolumeArgs args) throws IOException {
-    try {
-      metrics.incNumVolumeCreates();
-      volumeManager.createVolume(args);
-    } catch (Exception ex) {
-      metrics.incNumVolumeCreateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    try {
-      metrics.incNumVolumeUpdates();
-      volumeManager.setOwner(volume, owner);
-    } catch (Exception ex) {
-      metrics.incNumVolumeUpdateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  @Override
-  public void setQuota(String volume, long quota) throws IOException {
-    try {
-      metrics.incNumVolumeUpdates();
-      volumeManager.setQuota(volume, quota);
-    } catch (Exception ex) {
-      metrics.incNumVolumeUpdateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Checks if the specified user can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException {
-    try {
-      metrics.incNumVolumeCheckAccesses();
-      return volumeManager.checkVolumeAccess(volume, userAcl);
-    } catch (Exception ex) {
-      metrics.incNumVolumeCheckAccessFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   *
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    try {
-      metrics.incNumVolumeInfos();
-      return volumeManager.getVolumeInfo(volume);
-    } catch (Exception ex) {
-      metrics.incNumVolumeInfoFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    try {
-      metrics.incNumVolumeDeletes();
-      volumeManager.deleteVolume(volume);
-    } catch (Exception ex) {
-      metrics.incNumVolumeDeleteFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Lists volume owned by a specific user.
-   *
-   * @param userName - user name
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listVolumeByUser(String userName, String prefix,
-      String prevKey, int maxKeys) throws IOException {
-    try {
-      metrics.incNumVolumeLists();
-      return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys);
-    } catch (Exception ex) {
-      metrics.incNumVolumeListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Lists volume all volumes in the cluster.
-   *
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listAllVolumes(String prefix, String prevKey, int
-      maxKeys) throws IOException {
-    try {
-      metrics.incNumVolumeLists();
-      return volumeManager.listVolumes(null, prefix, prevKey, maxKeys);
-    } catch (Exception ex) {
-      metrics.incNumVolumeListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Creates a bucket.
-   *
-   * @param bucketInfo - BucketInfo to create bucket.
-   * @throws IOException
-   */
-  @Override
-  public void createBucket(KsmBucketInfo bucketInfo) throws IOException {
-    try {
-      metrics.incNumBucketCreates();
-      bucketManager.createBucket(bucketInfo);
-    } catch (Exception ex) {
-      metrics.incNumBucketCreateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(String volumeName,
-      String startKey, String prefix, int maxNumOfBuckets)
-      throws IOException {
-    try {
-      metrics.incNumBucketLists();
-      return bucketManager.listBuckets(volumeName,
-          startKey, prefix, maxNumOfBuckets);
-    } catch (IOException ex) {
-      metrics.incNumBucketListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Gets the bucket information.
-   *
-   * @param volume - Volume name.
-   * @param bucket - Bucket name.
-   * @return KsmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmBucketInfo getBucketInfo(String volume, String bucket)
-      throws IOException {
-    try {
-      metrics.incNumBucketInfos();
-      return bucketManager.getBucketInfo(volume, bucket);
-    } catch (Exception ex) {
-      metrics.incNumBucketInfoFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Allocate a key.
-   *
-   * @param args - attributes of the key.
-   * @return KsmKeyInfo - the info about the allocated key.
-   * @throws IOException
-   */
-  @Override
-  public OpenKeySession openKey(KsmKeyArgs args) throws IOException {
-    try {
-      metrics.incNumKeyAllocates();
-      return keyManager.openKey(args);
-    } catch (Exception ex) {
-      metrics.incNumKeyAllocateFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public void commitKey(KsmKeyArgs args, int clientID)
-      throws IOException {
-    try {
-      metrics.incNumKeyCommits();
-      keyManager.commitKey(args, clientID);
-    } catch (Exception ex) {
-      metrics.incNumKeyCommitFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException {
-    try {
-      metrics.incNumBlockAllocateCalls();
-      return keyManager.allocateBlock(args, clientID);
-    } catch (Exception ex) {
-      metrics.incNumBlockAllocateCallFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Lookup a key.
-   *
-   * @param args - attributes of the key.
-   * @return KsmKeyInfo - the info about the requested key.
-   * @throws IOException
-   */
-  @Override
-  public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException {
-    try {
-      metrics.incNumKeyLookups();
-      return keyManager.lookupKey(args);
-    } catch (Exception ex) {
-      metrics.incNumKeyLookupFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException {
-    try {
-      metrics.incNumKeyRenames();
-      keyManager.renameKey(args, toKeyName);
-    } catch (IOException e) {
-      metrics.incNumKeyRenameFails();
-      throw e;
-    }
-  }
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args - attributes of the key.
-   * @throws IOException
-   */
-  @Override
-  public void deleteKey(KsmKeyArgs args) throws IOException {
-    try {
-      metrics.incNumKeyDeletes();
-      keyManager.deleteKey(args);
-    } catch (Exception ex) {
-      metrics.incNumKeyDeleteFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-    try {
-      metrics.incNumKeyLists();
-      return keyManager.listKeys(volumeName, bucketName,
-          startKey, keyPrefix, maxKeys);
-    } catch (IOException ex) {
-      metrics.incNumKeyListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(KsmBucketArgs args)
-      throws IOException {
-    try {
-      metrics.incNumBucketUpdates();
-      bucketManager.setBucketProperty(args);
-    } catch (Exception ex) {
-      metrics.incNumBucketUpdateFails();
-      throw ex;
-    }
-  }
-
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  public void deleteBucket(String volume, String bucket) throws IOException {
-    try {
-      metrics.incNumBucketDeletes();
-      bucketManager.deleteBucket(volume, bucket);
-    } catch (Exception ex) {
-      metrics.incNumBucketDeleteFails();
-      throw ex;
-    }
-  }
-
-  private void registerMXBean() {
-    Map<String, String> jmxProperties = new HashMap<String, String>();
-    jmxProperties.put("component", "ServerRuntime");
-    this.ksmInfoBeanName =
-        MBeans.register("KeySpaceManager",
-            "KeySpaceManagerInfo",
-            jmxProperties,
-            this);
-  }
-
-  private void unregisterMXBean() {
-    if (this.ksmInfoBeanName != null) {
-      MBeans.unregister(this.ksmInfoBeanName);
-      this.ksmInfoBeanName = null;
-    }
-  }
-
-  @Override
-  public String getRpcPort() {
-    return "" + ksmRpcAddress.getPort();
-  }
-
-  @VisibleForTesting
-  public KeySpaceManagerHttpServer getHttpServer() {
-    return httpServer;
-  }
-
-  @Override
-  public List<ServiceInfo> getServiceList() throws IOException {
-    // When we implement multi-home this call has to be handled properly.
-    List<ServiceInfo> services = new ArrayList<>();
-    ServiceInfo.Builder ksmServiceInfoBuilder = ServiceInfo.newBuilder()
-        .setNodeType(HddsProtos.NodeType.KSM)
-        .setHostname(ksmRpcAddress.getHostName())
-        .addServicePort(ServicePort.newBuilder()
-                .setType(ServicePort.Type.RPC)
-                .setValue(ksmRpcAddress.getPort())
-            .build());
-    if (httpServer.getHttpAddress() != null) {
-      ksmServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTP)
-          .setValue(httpServer.getHttpAddress().getPort())
-          .build());
-    }
-    if (httpServer.getHttpsAddress() != null) {
-      ksmServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTPS)
-          .setValue(httpServer.getHttpsAddress().getPort())
-          .build());
-    }
-    services.add(ksmServiceInfoBuilder.build());
-
-    // For client we have to return SCM with container protocol port,
-    // not block protocol.
-    InetSocketAddress scmAddr = getScmAddressForClients(
-        configuration);
-    ServiceInfo.Builder scmServiceInfoBuilder = ServiceInfo.newBuilder()
-        .setNodeType(HddsProtos.NodeType.SCM)
-        .setHostname(scmAddr.getHostName())
-        .addServicePort(ServicePort.newBuilder()
-            .setType(ServicePort.Type.RPC)
-            .setValue(scmAddr.getPort()).build());
-    services.add(scmServiceInfoBuilder.build());
-
-    List<HddsProtos.Node> nodes = scmContainerClient.queryNode(HEALTHY,
-        HddsProtos.QueryScope.CLUSTER, "");
-
-    for (HddsProtos.Node node : nodes) {
-      HddsProtos.DatanodeDetailsProto datanode = node.getNodeID();
-
-      ServiceInfo.Builder dnServiceInfoBuilder = ServiceInfo.newBuilder()
-          .setNodeType(HddsProtos.NodeType.DATANODE)
-          .setHostname(datanode.getHostName());
-
-      dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTP)
-          .setValue(DatanodeDetails.getFromProtoBuf(datanode)
-              .getPort(DatanodeDetails.Port.Name.REST).getValue())
-          .build());
-
-      services.add(dnServiceInfoBuilder.build());
-    }
-
-    metrics.incNumGetServiceLists();
-    // For now there is no exception that can can happen in this call,
-    // so failure metrics is not handled. In future if there is any need to
-    // handle exception in this method, we need to incorporate
-    // metrics.incNumGetServiceListFails()
-    return services;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
deleted file mode 100644
index 478804b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-
-import java.io.IOException;
-
-/**
- * HttpServer wrapper for the KeySpaceManager.
- */
-public class KeySpaceManagerHttpServer extends BaseHttpServer {
-
-  public KeySpaceManagerHttpServer(Configuration conf, KeySpaceManager ksm)
-      throws IOException {
-    super(conf, "ksm");
-    addServlet("serviceList", "/serviceList", ServiceListJSONServlet.class);
-    getWebAppContext().setAttribute(OzoneConsts.KSM_CONTEXT_ATTRIBUTE, ksm);
-  }
-
-  @Override protected String getHttpAddressKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpBindHostKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override protected String getHttpsAddressKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpsBindHostKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override protected String getBindHostDefault() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override protected int getHttpBindPortDefault() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected int getHttpsBindPortDefault() {
-    return KSMConfigKeys.OZONE_KSM_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected String getKeytabFile() {
-    return KSMConfigKeys.OZONE_KSM_KEYTAB_FILE;
-  }
-
-  @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
-  }
-
-  @Override protected String getEnabledKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_ENABLED_KEY;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
deleted file mode 100644
index 8e2540a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.BackgroundTask;
-import org.apache.hadoop.utils.BackgroundTaskQueue;
-import org.apache.hadoop.utils.BackgroundTaskResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-/**
- * This is the background service to delete hanging open keys.
- * Scan the metadata of ksm periodically to get
- * the keys with prefix "#open#" and ask scm to
- * delete metadata accordingly, if scm returns
- * success for keys, then clean up those keys.
- */
-public class OpenKeyCleanupService extends BackgroundService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OpenKeyCleanupService.class);
-
-  private final static int OPEN_KEY_DELETING_CORE_POOL_SIZE = 2;
-
-  private final KeyManager keyManager;
-  private final ScmBlockLocationProtocol scmClient;
-
-  public OpenKeyCleanupService(ScmBlockLocationProtocol scmClient,
-      KeyManager keyManager, int serviceInterval,
-      long serviceTimeout) {
-    super("OpenKeyCleanupService", serviceInterval, TimeUnit.SECONDS,
-        OPEN_KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
-    this.keyManager = keyManager;
-    this.scmClient = scmClient;
-  }
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    queue.add(new OpenKeyDeletingTask());
-    return queue;
-  }
-
-  private class OpenKeyDeletingTask
-      implements BackgroundTask<BackgroundTaskResult> {
-
-    @Override
-    public int getPriority() {
-      return 0;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      try {
-        List<BlockGroup> keyBlocksList = keyManager.getExpiredOpenKeys();
-        if (keyBlocksList.size() > 0) {
-          int toDeleteSize = keyBlocksList.size();
-          LOG.debug("Found {} to-delete open keys in KSM", toDeleteSize);
-          List<DeleteBlockGroupResult> results =
-              scmClient.deleteKeyBlocks(keyBlocksList);
-          int deletedSize = 0;
-          for (DeleteBlockGroupResult result : results) {
-            if (result.isSuccess()) {
-              try {
-                keyManager.deleteExpiredOpenKey(result.getObjectKey());
-                LOG.debug("Key {} deleted from KSM DB", result.getObjectKey());
-                deletedSize += 1;
-              } catch (IOException e) {
-                LOG.warn("Failed to delete hanging-open key {}",
-                    result.getObjectKey(), e);
-              }
-            } else {
-              LOG.warn("Deleting open Key {} failed because some of the blocks"
-                      + " were failed to delete, failed blocks: {}",
-                  result.getObjectKey(),
-                  StringUtils.join(",", result.getFailedBlocks()));
-            }
-          }
-          LOG.info("Found {} expired open key entries, successfully " +
-              "cleaned up {} entries", toDeleteSize, deletedSize);
-          return results::size;
-        } else {
-          LOG.debug("No hanging open key fond in KSM");
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to get hanging open keys, retry in"
-            + " next interval", e);
-      }
-      return BackgroundTaskResult.EmptyTaskResult.newResult();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java
deleted file mode 100644
index 34a80ce..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-
-/**
- * Provides REST access to Ozone Service List.
- * <p>
- * This servlet generally will be placed under the /serviceList URL of
- * KeySpaceManager HttpServer.
- *
- * The return format is of JSON and in the form
- * <p>
- *  <code><pre>
- *  {
- *    "services" : [
- *      {
- *        "NodeType":"KSM",
- *        "Hostname" "$hostname",
- *        "ports" : {
- *          "$PortType" : "$port",
- *          ...
- *        }
- *      }
- *    ]
- *  }
- *  </pre></code>
- *  <p>
- *
- */
-public class ServiceListJSONServlet  extends HttpServlet  {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ServiceListJSONServlet.class);
-  private static final long serialVersionUID = 1L;
-
-  private KeySpaceManager ksm;
-
-  public void init() throws ServletException {
-    this.ksm = (KeySpaceManager) getServletContext()
-        .getAttribute(OzoneConsts.KSM_CONTEXT_ATTRIBUTE);
-  }
-
-  /**
-   * Process a GET request for the specified resource.
-   *
-   * @param request
-   *          The servlet request we are processing
-   * @param response
-   *          The servlet response we are creating
-   */
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response) {
-    try {
-      ObjectMapper objectMapper = new ObjectMapper();
-      objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
-      response.setContentType("application/json; charset=utf8");
-      PrintWriter writer = response.getWriter();
-      try {
-        writer.write(objectMapper.writeValueAsString(ksm.getServiceList()));
-      } finally {
-        if (writer != null) {
-          writer.close();
-        }
-      }
-    } catch (IOException e) {
-      LOG.error(
-          "Caught an exception while processing ServiceList request", e);
-      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java
deleted file mode 100644
index 6ac78d6..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KSM volume manager interface.
- */
-public interface VolumeManager {
-
-  /**
-   * Create a new volume.
-   * @param args - Volume args to create a volume
-   */
-  void createVolume(KsmVolumeArgs args) throws IOException;
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  void setOwner(String volume, String owner) throws IOException;
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  void setQuota(String volume, long quota) throws IOException;
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  KsmVolumeArgs getVolumeInfo(String volume) throws IOException;
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  void deleteVolume(String volume) throws IOException;
-
-  /**
-   * Checks if the specified user with a role can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acl which needs to be checked for access
-   * @return true if the user has access for the volume, false otherwise
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException;
-
-  /**
-   * Returns a list of volumes owned by a given user; if user is null,
-   * returns all volumes.
-   *
-   * @param userName
-   *   volume owner
-   * @param prefix
-   *   the volume prefix used to filter the listing result.
-   * @param startKey
-   *   the start volume name determines where to start listing from,
-   *   this key is excluded from the result.
-   * @param maxKeys
-   *   the maximum number of volumes to return.
-   * @return a list of {@link KsmVolumeArgs}
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listVolumes(String userName, String prefix,
-      String startKey, int maxKeys) throws IOException;
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)

Posted by bh...@apache.org.
YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9edc74f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9edc74f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9edc74f6

Branch: refs/heads/HDDS-48
Commit: 9edc74f64a31450af3c55c0dadf352862e4b359d
Parents: 39ad989
Author: Haibo Chen <ha...@apache.org>
Authored: Thu Jul 5 10:42:39 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Fri Jul 6 11:03:48 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 ++
 .../api/records/impl/LightWeightResource.java   |  23 ++-
 .../scheduler/fair/ConfigurableResource.java    |  69 +++++++-
 .../fair/FairSchedulerConfiguration.java        | 174 ++++++++++++++++---
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../fair/TestFairSchedulerConfiguration.java    | 151 ++++++++++++----
 .../src/site/markdown/FairScheduler.md          |   6 +-
 8 files changed, 385 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5841361..5cc81e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -67,11 +67,6 @@
   </Match>
   <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
-    <Method name="getLocalityStatistics" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>
@@ -118,6 +113,18 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
 
+  <!-- Ignore exposed internal representations -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.api.records.Resource" />
+    <Method name="getResources" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
+    <Method name="getLocalityStatistics" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+
   <!-- Object cast is based on the event type -->
   <Match>
     <Class name="org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher" />

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 71a6b54..173d4c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
@@ -75,6 +76,18 @@ public abstract class Resource implements Comparable<Resource> {
   @Private
   public static final int VCORES_INDEX = 1;
 
+  /**
+   * Return a new {@link Resource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   * @return a new {@link Resource} instance
+   */
+  @Private
+  @Unstable
+  public static Resource newInstance(long value) {
+    return new LightWeightResource(value);
+  }
+
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index a6e6432..77f77f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -18,9 +18,8 @@
 
 package org.apache.hadoop.yarn.api.records.impl;
 
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -58,13 +57,29 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.*;
  *
  * @see Resource
  */
-@InterfaceAudience.Private
+@Private
 @Unstable
 public class LightWeightResource extends Resource {
 
   private ResourceInformation memoryResInfo;
   private ResourceInformation vcoresResInfo;
 
+  /**
+   * Create a new {@link LightWeightResource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   */
+  public LightWeightResource(long value) {
+    ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
+    initResourceInformations(value, value, types.length);
+
+    for (int i = 2; i < types.length; i++) {
+      resources[i] = new ResourceInformation();
+      ResourceInformation.copy(types[i], resources[i]);
+      resources[i].setValue(value);
+    }
+  }
+
   public LightWeightResource(long memory, int vcores) {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
@@ -91,7 +106,7 @@ public class LightWeightResource extends Resource {
     }
   }
 
-  private void initResourceInformations(long memory, int vcores,
+  private void initResourceInformations(long memory, long vcores,
       int numberOfKnownResourceTypes) {
     this.memoryResInfo = newDefaultInformation(MEMORY_URI, MEMORY_MB.getUnits(),
         memory);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
index ecdd011..0c3b0dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
@@ -18,9 +18,13 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import java.util.Arrays;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
  * A {@code ConfigurableResource} object represents an entity that is used to
@@ -33,29 +37,53 @@ public class ConfigurableResource {
   private final Resource resource;
   private final double[] percentages;
 
-  public ConfigurableResource(double[] percentages) {
+  ConfigurableResource() {
+    this(getOneHundredPercentArray());
+  }
+
+  ConfigurableResource(double[] percentages) {
     this.percentages = percentages.clone();
     this.resource = null;
   }
 
+  ConfigurableResource(long value) {
+    this(Resource.newInstance(value));
+  }
+
   public ConfigurableResource(Resource resource) {
     this.percentages = null;
     this.resource = resource;
   }
 
+  private static double[] getOneHundredPercentArray() {
+    double[] resourcePercentages =
+        new double[ResourceUtils.getNumberOfKnownResourceTypes()];
+    Arrays.fill(resourcePercentages, 1.0);
+
+    return resourcePercentages;
+  }
+
   /**
    * Get resource by multiplying the cluster resource and the percentage of
    * each resource respectively. Return the absolute resource if either
    * {@code percentages} or {@code clusterResource} is null.
    *
    * @param clusterResource the cluster resource
-   * @return resource
+   * @return resource the resulting resource
    */
   public Resource getResource(Resource clusterResource) {
     if (percentages != null && clusterResource != null) {
       long memory = (long) (clusterResource.getMemorySize() * percentages[0]);
       int vcore = (int) (clusterResource.getVirtualCores() * percentages[1]);
-      return Resource.newInstance(memory, vcore);
+      Resource res = Resource.newInstance(memory, vcore);
+      ResourceInformation[] clusterInfo = clusterResource.getResources();
+
+      for (int i = 2; i < clusterInfo.length; i++) {
+        res.setResourceValue(i,
+            (long)(clusterInfo[i].getValue() * percentages[i]));
+      }
+
+      return res;
     } else {
       return resource;
     }
@@ -69,4 +97,39 @@ public class ConfigurableResource {
   public Resource getResource() {
     return resource;
   }
+
+  /**
+   * Set the value of the wrapped resource if this object isn't setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the value
+   */
+  void setValue(String name, long value) {
+    if (resource != null) {
+      resource.setResourceValue(name, value);
+    }
+  }
+
+  /**
+   * Set the percentage of the resource if this object is setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the percentage
+   */
+  void setPercentage(String name, double value) {
+    if (percentages != null) {
+      Integer index = ResourceUtils.getResourceTypeIndex().get(name);
+
+      if (index != null) {
+        percentages[index] = value;
+      } else {
+        throw new ResourceNotFoundException("The requested resource, \""
+            + name + "\", could not be found.");
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index b50e4bb..8c4932b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -213,6 +214,9 @@ public class FairSchedulerConfiguration extends Configuration {
           CONF_PREFIX + "reservable-nodes";
   public static final float RESERVABLE_NODES_DEFAULT = 0.05f;
 
+  private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
+          "Error reading resource config--invalid resource definition: ";
+
   public FairSchedulerConfiguration() {
     super();
   }
@@ -407,54 +411,167 @@ public class FairSchedulerConfiguration extends Configuration {
   }
 
   /**
-   * Parses a resource config value of a form like "1024", "1024 mb",
-   * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.
-   * 
-   * @throws AllocationConfigurationException
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@link Long#MAX_VALUE} or 100%, as appropriate. Also, in the new
+   * style resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * @param value the resource definition to parse
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
    */
-  public static ConfigurableResource parseResourceConfigValue(String val)
+  public static ConfigurableResource parseResourceConfigValue(String value)
       throws AllocationConfigurationException {
+    return parseResourceConfigValue(value, Long.MAX_VALUE);
+  }
+
+  /**
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@code missing} or 0%, as appropriate. Also, in the new style
+   * resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * The {@code missing} parameter is only used in the case of new style
+   * resources without percentages. With new style resources with percentages,
+   * any missing resources will be assumed to be 100% because percentages are
+   * only used with maximum resource limits.
+   *
+   * @param value the resource definition to parse
+   * @param missing the value to use for any unspecified resources
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
+   */
+  public static ConfigurableResource parseResourceConfigValue(String value,
+      long missing) throws AllocationConfigurationException {
     ConfigurableResource configurableResource;
+
+    if (value.trim().isEmpty()) {
+      throw new AllocationConfigurationException("Error reading resource "
+          + "config--the resource string is empty.");
+    }
+
     try {
-      val = StringUtils.toLowerCase(val);
-      if (val.contains("%")) {
-        configurableResource = new ConfigurableResource(
-            getResourcePercentage(val));
+      if (value.contains("=")) {
+        configurableResource = parseNewStyleResource(value, missing);
+      } else if (value.contains("%")) {
+        configurableResource = parseOldStyleResourceAsPercentage(value);
       } else {
-        int memory = findResource(val, "mb");
-        int vcores = findResource(val, "vcores");
-        configurableResource = new ConfigurableResource(
-            BuilderUtils.newResource(memory, vcores));
+        configurableResource = parseOldStyleResource(value);
       }
-    } catch (AllocationConfigurationException ex) {
-      throw ex;
-    } catch (Exception ex) {
+    } catch (RuntimeException ex) {
       throw new AllocationConfigurationException(
           "Error reading resource config", ex);
     }
+
+    return configurableResource;
+  }
+
+  private static ConfigurableResource parseNewStyleResource(String value,
+          long missing) throws AllocationConfigurationException {
+
+    final ConfigurableResource configurableResource;
+    boolean asPercent = value.contains("%");
+    if (asPercent) {
+      configurableResource = new ConfigurableResource();
+    } else {
+      configurableResource = new ConfigurableResource(missing);
+    }
+
+    String[] resources = value.split(",");
+    for (String resource : resources) {
+      String[] parts = resource.split("=");
+
+      if (parts.length != 2) {
+        throw createConfigException(value,
+                        "Every resource must be of the form: name=value.");
+      }
+
+      String resourceName = parts[0].trim();
+      String resourceValue = parts[1].trim();
+      try {
+        if (asPercent) {
+          configurableResource.setPercentage(resourceName,
+              findPercentage(resourceValue, ""));
+        } else {
+          configurableResource.setValue(resourceName,
+              Long.parseLong(resourceValue));
+        }
+      } catch (ResourceNotFoundException ex) {
+        throw createConfigException(value, "The "
+            + "resource name, \"" + resourceName + "\" was not "
+            + "recognized. Please check the value of "
+            + YarnConfiguration.RESOURCE_TYPES + " in the Resource "
+            + "Manager's configuration files.", ex);
+      } catch (NumberFormatException ex) {
+        // This only comes from Long.parseLong()
+        throw createConfigException(value, "The "
+            + "resource values must all be integers. \"" + resourceValue
+            + "\" is not an integer.", ex);
+      } catch (AllocationConfigurationException ex) {
+        // This only comes from findPercentage()
+        throw createConfigException(value, "The "
+            + "resource values must all be percentages. \""
+            + resourceValue + "\" is either not a number or does not "
+            + "include the '%' symbol.", ex);
+      }
+    }
     return configurableResource;
   }
 
+  private static ConfigurableResource parseOldStyleResourceAsPercentage(
+          String value) throws AllocationConfigurationException {
+    return new ConfigurableResource(
+            getResourcePercentage(StringUtils.toLowerCase(value)));
+  }
+
+  private static ConfigurableResource parseOldStyleResource(String value)
+          throws AllocationConfigurationException {
+    final String lCaseValue = StringUtils.toLowerCase(value);
+    int memory = findResource(lCaseValue, "mb");
+    int vcores = findResource(lCaseValue, "vcores");
+
+    return new ConfigurableResource(
+            BuilderUtils.newResource(memory, vcores));
+  }
+
   private static double[] getResourcePercentage(
       String val) throws AllocationConfigurationException {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
     double[] resourcePercentage = new double[numberOfKnownResourceTypes];
     String[] strings = val.split(",");
+
     if (strings.length == 1) {
       double percentage = findPercentage(strings[0], "");
       for (int i = 0; i < numberOfKnownResourceTypes; i++) {
-        resourcePercentage[i] = percentage/100;
+        resourcePercentage[i] = percentage;
       }
     } else {
-      resourcePercentage[0] = findPercentage(val, "memory")/100;
-      resourcePercentage[1] = findPercentage(val, "cpu")/100;
+      resourcePercentage[0] = findPercentage(val, "memory");
+      resourcePercentage[1] = findPercentage(val, "cpu");
     }
+
     return resourcePercentage;
   }
 
   private static double findPercentage(String val, String units)
-    throws AllocationConfigurationException {
+      throws AllocationConfigurationException {
     final Pattern pattern =
         Pattern.compile("((\\d+)(\\.\\d*)?)\\s*%\\s*" + units);
     Matcher matcher = pattern.matcher(val);
@@ -467,7 +584,22 @@ public class FairSchedulerConfiguration extends Configuration {
             units);
       }
     }
-    return Double.parseDouble(matcher.group(1));
+    return Double.parseDouble(matcher.group(1)) / 100.0;
+  }
+
+  private static AllocationConfigurationException createConfigException(
+          String value, String message) {
+    return createConfigException(value, message, null);
+  }
+
+  private static AllocationConfigurationException createConfigException(
+      String value, String message, Throwable t) {
+    String msg = INVALID_RESOURCE_DEFINITION_PREFIX + value + ". " + message;
+    if (t != null) {
+      return new AllocationConfigurationException(msg, t);
+    } else {
+      return new AllocationConfigurationException(msg);
+    }
   }
 
   public long getUpdateInterval() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index d5a436e..441c34a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -134,7 +134,7 @@ public class AllocationFileQueueParser {
       if (MIN_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);
         ConfigurableResource val =
-            FairSchedulerConfiguration.parseResourceConfigValue(text);
+            FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
         builder.minQueueResources(queueName, val.getResource());
       } else if (MAX_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 481645b..76a5af5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -102,60 +102,145 @@ public class TestFairSchedulerConfiguration {
 
   @Test
   public void testParseResourceConfigValue() throws Exception {
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2 vcores, 1024 mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 mb, 2 vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2vcores,1024mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024mb,2vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024   mb, 2    vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 Mb, 2 vCores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024 mb, 2 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024.3 mb, 2.35 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024. mb, 2. vcores  ").getResource());
-
-    Resource clusterResource = BuilderUtils.newResource(2048, 4);
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    Resource expected = BuilderUtils.newResource(5 * 1024, 2);
+    Resource clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+
+    assertEquals(expected,
+        parseResourceConfigValue("2 vcores, 5120 mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 mb, 2 vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("2vcores,5120mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb,2vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb   mb, 2    vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 Mb, 2 vCores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120 mb, 2 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120.3 mb, 2.35 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120. mb, 2. vcores  ").getResource());
+
+    assertEquals(expected,
         parseResourceConfigValue("50% memory, 50% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50% Memory, 50% CpU").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("50%").getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue("50% memory, 100% cpu").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue(" 100% cpu, 50% memory").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 0),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 0),
         parseResourceConfigValue("50% memory, 0% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50 % memory, 50 % cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50%memory,50%cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("  50  %  memory,  50  %  cpu  ").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50.% memory, 50.% cpu").
             getResource(clusterResource));
-
-    clusterResource =  BuilderUtils.newResource(1024 * 10, 4);
     assertEquals(BuilderUtils.newResource((int)(1024 * 10 * 0.109), 2),
         parseResourceConfigValue("10.9% memory, 50.6% cpu").
             getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+
+    Configuration conf = new Configuration();
+
+    conf.set(YarnConfiguration.RESOURCE_TYPES, "test1");
+    ResourceUtils.resetResourceTypes(conf);
+
+    clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+    expected = BuilderUtils.newResource(5 * 1024, 2);
+    expected.setResourceValue("test1", Long.MAX_VALUE);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120").getResource());
+    assertEquals(expected, parseResourceConfigValue(" vcores = 2 , "
+            + "memory-mb = 5120 ").getResource());
+
+    expected.setResourceValue("test1", 0L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 ",
+            0L).getResource());
+
+    clusterResource.setResourceValue("test1", 8L);
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120, "
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("test1=4, vcores=2, "
+            + "memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, test1=4, "
+            + "vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120,"
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 , "
+            + "test1 = 4 ").getResource());
+
+    expected = BuilderUtils.newResource(4 * 1024, 3);
+    expected.setResourceValue("test1", 8L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,"
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , "
+            + "memory-mb = 40 % ").getResource(clusterResource));
+
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, memory-mb=40%, "
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("test1=50%, vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, test1=50%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,memory-mb=40%,"
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
+            + "test1 = 50 % ").getResource(clusterResource));
   }
   
   @Test(expected = AllocationConfigurationException.class)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 269f5b4..b5bcbf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,11 +86,11 @@ The allocation file must be in XML format. The format contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an optional attribute 'type', which when set to 'parent' makes it a parent queue. This is useful when we want to create a parent queue without configuring any leaf queues. Each queue element may contain the following properties:
 
-    * **minResources**: minimum resources the queue is entitled to, in the form "X mb, Y vcores". For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and minimum. Note that it is possible that a queue that is below its minimum may not immediately get up to its minimum when it submits an application, because already-running jobs may be using those resources.
+    * **minResources**: minimum resources the queue is entitled to, in the form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is required when specifying resources other than memory and CPU. For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and its minimum. Note that it is possible for a queue that is below its minimum to not immediately get up to its minimum when an a
 pplication is submitted to the queue, because already-running jobs may be using those resources.
 
-    * **maxResources**: maximum resources a queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). A queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxResources**: maximum resources a queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. A queue will not be assigned a container that would put its aggregate usage over this limit.
 
-    * **maxChildResources**: maximum resources an ad hoc child queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxChildResources**: maximum resources an ad hoc child queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
 
     * **maxRunningApps**: limit the number of apps from the queue to run at once
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by bh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
new file mode 100644
index 0000000..7c5b6db
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.w3c.dom.Element;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlFloat;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * XML documents.
+ */
+public final class AppInfoXmlVerifications {
+
+  private AppInfoXmlVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param info
+   * @param  app  an RMApp instance that contains the required values
+   */
+  public static void verify(Element info, RMApp app) {
+    checkStringMatch("id", app.getApplicationId()
+            .toString(), getXmlString(info, "id"));
+    checkStringMatch("user", app.getUser(),
+            getXmlString(info, "user"));
+    checkStringMatch("name", app.getName(),
+            getXmlString(info, "name"));
+    checkStringMatch("applicationType",
+            app.getApplicationType(), getXmlString(info, "applicationType"));
+    checkStringMatch("queue", app.getQueue(),
+            getXmlString(info, "queue"));
+    assertEquals("priority doesn't match", 0, getXmlInt(info, "priority"));
+    checkStringMatch("state", app.getState().toString(),
+            getXmlString(info, "state"));
+    checkStringMatch("finalStatus", app
+            .getFinalApplicationStatus().toString(),
+            getXmlString(info, "finalStatus"));
+    assertEquals("progress doesn't match", 0, getXmlFloat(info, "progress"),
+        0.0);
+    if ("UNASSIGNED".equals(getXmlString(info, "trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+              getXmlString(info, "trackingUI"));
+    }
+    WebServicesTestUtils.checkStringEqual("diagnostics",
+            app.getDiagnostics().toString(), getXmlString(info, "diagnostics"));
+    assertEquals("clusterId doesn't match",
+            ResourceManager.getClusterTimeStamp(),
+            getXmlLong(info, "clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+            getXmlLong(info, "startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+            getXmlLong(info, "finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+            getXmlLong(info, "elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress", app
+                    .getCurrentAppAttempt().getMasterContainer()
+                    .getNodeHttpAddress(),
+            getXmlString(info, "amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        getXmlString(info, "amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        getXmlString(info, "amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024,
+            getXmlInt(info, "allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+            getXmlInt(info, "allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        getXmlInt(info, "runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+            info.getElementsByTagName("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getMemorySize(),
+            getXmlInt(info, "preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+            getXmlInt(info, "preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumNonAMContainersPreempted(),
+            getXmlInt(info, "numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumAMContainersPreempted(),
+            getXmlInt(info, "numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match", app
+                    .getLogAggregationStatusForAppReport().toString(),
+            getXmlString(info, "logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match", app
+                    .getApplicationSubmissionContext().getUnmanagedAM(),
+            getXmlBoolean(info, "unmanagedApplication"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getApplicationSubmissionContext().getNodeLabelExpression(),
+            getXmlString(info, "appNodeLabelExpression"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+            getXmlString(info, "amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+            AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+            getXmlString(info, "amRPCAddress"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
new file mode 100644
index 0000000..a8990ca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.UniformInterfaceException;
+
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+
+/**
+ * This class is merely a wrapper for {@link ClientResponse}. Given that the
+ * entity input stream of {@link ClientResponse} can be read only once by
+ * default and for some tests it is convenient to read the input stream many
+ * times, this class hides the details of how to do that and prevents
+ * unnecessary code duplication in tests.
+ */
+public class BufferedClientResponse {
+  private ClientResponse response;
+
+  public BufferedClientResponse(ClientResponse response) {
+    response.bufferEntity();
+    this.response = response;
+  }
+
+  public <T> T getEntity(Class<T> clazz)
+          throws ClientHandlerException, UniformInterfaceException {
+    try {
+      response.getEntityInputStream().reset();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return response.getEntity(clazz);
+  }
+
+  public MediaType getType() {
+    return response.getType();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
new file mode 100644
index 0000000..9d6a111
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.core.MediaType;
+
+import java.util.function.Consumer;
+
+import static org.junit.Assert.*;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * JSON responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link JsonCustomResourceTypeTestcase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class JsonCustomResourceTypeTestcase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JsonCustomResourceTypeTestcase.class);
+
+  private final WebResource path;
+  private final BufferedClientResponse response;
+  private final JSONObject parsedResponse;
+
+  public JsonCustomResourceTypeTestcase(WebResource path,
+                                        BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+    this.parsedResponse = response.getEntity(JSONObject.class);
+  }
+
+  public void verify(Consumer<JSONObject> verifier) {
+    assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    logResponse();
+
+    String responseStr = response.getEntity(String.class);
+    if (responseStr == null || responseStr.isEmpty()) {
+      throw new IllegalStateException("Response is null or empty!");
+    }
+    verifier.accept(parsedResponse);
+  }
+
+  private void logResponse() {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        parsedResponse);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
new file mode 100644
index 0000000..6e58a89
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Map;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsJsonVerifications {
+  private final ResourceRequest resourceRequest;
+  private final JSONObject requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsJsonVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verify(JSONObject requestInfo, ResourceRequest rr)
+      throws JSONException {
+    createDefaultBuilder(requestInfo, rr).build().verify();
+  }
+
+  public static void verifyWithCustomResourceTypes(JSONObject requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes)
+      throws JSONException {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(
+            extractActualCustomResourceTypes(requestInfo, expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(JSONObject requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsJsonVerifications.Builder()
+            .withRequest(resourceRequest)
+            .withRequestInfoJson(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceTypes(
+      JSONObject requestInfo, List<String> expectedResourceTypes)
+      throws JSONException {
+    JSONObject capability = requestInfo.getJSONObject("capability");
+    Map<String, Long> resourceAndValue =
+        extractCustomResorceTypeValues(capability, expectedResourceTypes);
+    Map.Entry<String, Long> resourceEntry =
+        resourceAndValue.entrySet().iterator().next();
+
+    assertTrue(
+        "Found resource type: " + resourceEntry.getKey()
+            + " is not in expected resource types: " + expectedResourceTypes,
+        expectedResourceTypes.contains(resourceEntry.getKey()));
+
+    return resourceAndValue;
+  }
+
+  private static Map<String, Long> extractCustomResorceTypeValues(
+      JSONObject capability, List<String> expectedResourceTypes)
+      throws JSONException {
+    assertTrue(
+        "resourceCategory does not have resourceInformations: " + capability,
+        capability.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        capability.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.length() - 2);
+
+    Map<String, Long> resourceValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+      resourceValues.put(name, value);
+    }
+
+    return resourceValues;
+  }
+
+  private void verify() throws JSONException {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+            requestInfo.getString("nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+            resourceRequest.getNumContainers(),
+            requestInfo.getInt("numContainers"));
+    assertEquals("relaxLocality doesn't match",
+            resourceRequest.getRelaxLocality(),
+            requestInfo.getBoolean("relaxLocality"));
+    assertEquals("priority does not match",
+            resourceRequest.getPriority().getPriority(),
+            requestInfo.getInt("priority"));
+    assertEquals("resourceName does not match",
+            resourceRequest.getResourceName(),
+            requestInfo.getString("resourceName"));
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+            requestInfo.getJSONObject("capability").getLong("memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+            requestInfo.getJSONObject("capability").getLong("vCores"));
+
+    verifyAtLeastOneCustomResourceIsSerialized();
+
+    JSONObject executionTypeRequest =
+            requestInfo.getJSONObject("executionTypeRequest");
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+            executionTypeRequest.getString("executionType"));
+    assertEquals("enforceExecutionType does not match",
+            resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+            executionTypeRequest.getBoolean("enforceExecutionType"));
+  }
+
+  /**
+   * JSON serialization produces "invalid JSON" by default as maps are
+   * serialized like this:
+   * "customResources":{"entry":{"key":"customResource-1","value":"0"}}
+   * If the map has multiple keys then multiple entries will be serialized.
+   * Our json parser in tests cannot handle duplicates therefore only one
+   * custom resource will be in the parsed json. See:
+   * https://issues.apache.org/jira/browse/YARN-7505
+   */
+  private void verifyAtLeastOneCustomResourceIsSerialized() {
+    boolean resourceFound = false;
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      if (customResourceTypes.containsKey(expectedCustomResourceType)) {
+        resourceFound = true;
+        Long resourceValue =
+            customResourceTypes.get(expectedCustomResourceType);
+        assertNotNull("Resource value should not be null!", resourceValue);
+      }
+    }
+    assertTrue("No custom resource type can be found in the response!",
+        resourceFound);
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsJsonVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private JSONObject requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+            List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(
+            Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfoJson(JSONObject requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsJsonVerifications build() {
+      return new ResourceRequestsJsonVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
new file mode 100644
index 0000000..af9b0f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsXmlVerifications {
+  private final ResourceRequest resourceRequest;
+  private final Element requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsXmlVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verifyWithCustomResourceTypes(Element requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes) {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(extractActualCustomResourceType(requestInfo,
+            expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(Element requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsXmlVerifications.Builder()
+        .withRequest(resourceRequest).withRequestInfo(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceType(
+      Element requestInfo, List<String> expectedResourceTypes) {
+    Element capability =
+        (Element) requestInfo.getElementsByTagName("capability").item(0);
+
+    return extractCustomResorceTypes(capability,
+        Sets.newHashSet(expectedResourceTypes));
+  }
+
+  private static Map<String, Long> extractCustomResorceTypes(Element capability,
+      Set<String> expectedResourceTypes) {
+    assertEquals(
+        toXml(capability) + " should have only one resourceInformations child!",
+        1, capability.getElementsByTagName("resourceInformations").getLength());
+    Element resourceInformations = (Element) capability
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.getLength() - 2);
+
+    Map<String, Long> resourceTypesAndValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+      resourceTypesAndValues.put(name, value);
+    }
+
+    return resourceTypesAndValues;
+  }
+
+  private void verify() {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+        getXmlString(requestInfo, "nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+        resourceRequest.getNumContainers(),
+        getXmlInt(requestInfo, "numContainers"));
+    assertEquals("relaxLocality doesn't match",
+        resourceRequest.getRelaxLocality(),
+        getXmlBoolean(requestInfo, "relaxLocality"));
+    assertEquals("priority does not match",
+        resourceRequest.getPriority().getPriority(),
+        getXmlInt(requestInfo, "priority"));
+    assertEquals("resourceName does not match",
+        resourceRequest.getResourceName(),
+        getXmlString(requestInfo, "resourceName"));
+    Element capability = (Element) requestInfo
+            .getElementsByTagName("capability").item(0);
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+        getXmlLong(capability, "memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+        getXmlLong(capability, "vCores"));
+
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      assertTrue(
+          "Custom resource type " + expectedCustomResourceType
+              + " cannot be found!",
+          customResourceTypes.containsKey(expectedCustomResourceType));
+
+      Long resourceValue = customResourceTypes.get(expectedCustomResourceType);
+      assertNotNull("Resource value should not be null!", resourceValue);
+    }
+
+    Element executionTypeRequest = (Element) requestInfo
+        .getElementsByTagName("executionTypeRequest").item(0);
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+        getXmlString(executionTypeRequest, "executionType"));
+    assertEquals("enforceExecutionType does not match",
+        resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+        getXmlBoolean(executionTypeRequest, "enforceExecutionType"));
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsXmlVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private Element requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+        List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfo(Element requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsXmlVerifications build() {
+      return new ResourceRequestsXmlVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
new file mode 100644
index 0000000..29260aa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+import org.xml.sax.InputSource;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.*;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.function.Consumer;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * XML responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link XmlCustomResourceTypeTestCase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class XmlCustomResourceTypeTestCase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(XmlCustomResourceTypeTestCase.class);
+
+  private WebResource path;
+  private BufferedClientResponse response;
+  private Document parsedResponse;
+
+  public XmlCustomResourceTypeTestCase(WebResource path,
+                                       BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+  }
+
+  public void verify(Consumer<Document> verifier) {
+    assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    parsedResponse = parseXml(response);
+    logResponse(parsedResponse);
+    verifier.accept(parsedResponse);
+  }
+
+  private Document parseXml(BufferedClientResponse response) {
+    try {
+      String xml = response.getEntity(String.class);
+      DocumentBuilder db =
+          DocumentBuilderFactory.newInstance().newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+
+      return db.parse(is);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void logResponse(Document doc) {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        toXml(doc));
+  }
+
+  public static String toXml(Node node) {
+    StringWriter writer;
+    try {
+      TransformerFactory tf = TransformerFactory.newInstance();
+      Transformer transformer = tf.newTransformer();
+      transformer.setOutputProperty(OutputKeys.INDENT, "yes");
+      transformer.setOutputProperty(
+          "{http://xml.apache.org/xslt}indent" + "-amount", "2");
+      writer = new StringWriter();
+      transformer.transform(new DOMSource(node), new StreamResult(writer));
+    } catch (TransformerException e) {
+      throw new RuntimeException(e);
+    }
+
+    return writer.getBuffer().toString();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDDS-48-merge

Posted by bh...@apache.org.
Merge branch 'trunk' into HDDS-48-merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f870f0dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f870f0dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f870f0dd

Branch: refs/heads/HDDS-48
Commit: f870f0ddbe33357edb01bfa08d9d27db3fe10604
Parents: 860c588 4a08ddf
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Jul 9 12:53:43 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Jul 9 12:53:43 2018 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/conf/hadoop-env.sh   |   6 +-
 .../src/main/conf/hadoop-metrics2.properties    |   2 +-
 .../crypto/key/kms/KMSClientProvider.java       |   4 +-
 .../src/main/conf/kms-log4j.properties          |   4 +-
 .../src/test/resources/log4j.properties         |   4 +-
 hadoop-hdds/framework/pom.xml                   |   5 +
 .../hadoop/hdds/server/events/EventWatcher.java |  43 +++++-
 .../hdds/server/events/EventWatcherMetrics.java |  79 ++++++++++
 .../hdds/server/events/TestEventWatcher.java    | 107 ++++++++++++--
 .../hadoop/yarn/client/AMRMClientUtils.java     |  91 ------------
 .../hadoop/yarn/server/AMRMClientRelayer.java   |   9 +-
 .../yarn/server/uam/UnmanagedAMPoolManager.java |  16 ++
 .../server/uam/UnmanagedApplicationManager.java |  40 ++---
 .../yarn/server/MockResourceManagerFacade.java  |  13 +-
 .../amrmproxy/FederationInterceptor.java        | 146 ++++++++++++++++---
 .../amrmproxy/BaseAMRMProxyTest.java            |   2 +
 .../amrmproxy/TestFederationInterceptor.java    |  17 +++
 17 files changed, 424 insertions(+), 164 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org