You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/05/31 15:49:55 UTC

[01/50] [abbrv] hadoop git commit: YARN-8292: Fix the dominant resource preemption cannot happen when some of the resource vector becomes negative. Contributed by Wangda Tan. [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/HDDS-4 f25b2357c -> 46edc0d2f (forced update)


YARN-8292: Fix the dominant resource preemption cannot happen when some of the resource vector becomes negative. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d5509c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d5509c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d5509c6

Branch: refs/heads/HDDS-4
Commit: 8d5509c68156faaa6641f4e747fc9ff80adccf88
Parents: bddfe79
Author: Eric E Payne <er...@oath.com>
Authored: Fri May 25 16:06:09 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Fri May 25 16:06:09 2018 +0000

----------------------------------------------------------------------
 .../resource/DefaultResourceCalculator.java     |  15 ++-
 .../resource/DominantResourceCalculator.java    |  39 ++++---
 .../yarn/util/resource/ResourceCalculator.java  |  13 ++-
 .../hadoop/yarn/util/resource/Resources.java    |   5 -
 .../AbstractPreemptableResourceCalculator.java  |  58 ++++++++---
 .../CapacitySchedulerPreemptionUtils.java       |  61 +++++++++--
 .../capacity/FifoCandidatesSelector.java        |   8 +-
 .../FifoIntraQueuePreemptionPlugin.java         |   4 +-
 .../capacity/IntraQueueCandidatesSelector.java  |   2 +-
 .../capacity/PreemptableResourceCalculator.java |   6 +-
 .../monitor/capacity/TempQueuePerPartition.java |   8 +-
 ...alCapacityPreemptionPolicyMockFramework.java |  30 ++++++
 .../TestPreemptionForQueueWithPriorities.java   | 103 ++++++++++++-------
 ...pacityPreemptionPolicyInterQueueWithDRF.java |  60 ++++++++++-
 14 files changed, 312 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 6375c4a..ab6d7f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -136,13 +136,18 @@ public class DefaultResourceCalculator extends ResourceCalculator {
   }
 
   @Override
-  public boolean isAnyMajorResourceZero(Resource resource) {
-    return resource.getMemorySize() == 0f;
-  }
-
-  @Override
   public Resource normalizeDown(Resource r, Resource stepFactor) {
     return Resources.createResource(
         roundDown((r.getMemorySize()), stepFactor.getMemorySize()));
   }
+
+  @Override
+  public boolean isAnyMajorResourceZeroOrNegative(Resource resource) {
+    return resource.getMemorySize() <= 0;
+  }
+
+  @Override
+  public boolean isAnyMajorResourceAboveZero(Resource resource) {
+    return resource.getMemorySize() > 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 6fed23b..2e85ebc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -577,19 +577,6 @@ public class DominantResourceCalculator extends ResourceCalculator {
   }
 
   @Override
-  public boolean isAnyMajorResourceZero(Resource resource) {
-    int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
-    for (int i = 0; i < maxLength; i++) {
-      ResourceInformation resourceInformation = resource
-          .getResourceInformation(i);
-      if (resourceInformation.getValue() == 0L) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Override
   public Resource normalizeDown(Resource r, Resource stepFactor) {
     Resource ret = Resource.newInstance(r);
     int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
@@ -613,4 +600,30 @@ public class DominantResourceCalculator extends ResourceCalculator {
     }
     return ret;
   }
+
+  @Override
+  public boolean isAnyMajorResourceZeroOrNegative(Resource resource) {
+    int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+    for (int i = 0; i < maxLength; i++) {
+      ResourceInformation resourceInformation = resource.getResourceInformation(
+          i);
+      if (resourceInformation.getValue() <= 0L) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public boolean isAnyMajorResourceAboveZero(Resource resource) {
+    int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+    for (int i = 0; i < maxLength; i++) {
+      ResourceInformation resourceInformation = resource.getResourceInformation(
+          i);
+      if (resourceInformation.getValue() > 0) {
+        return true;
+      }
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 1c42126..51078cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -239,12 +239,12 @@ public abstract class ResourceCalculator {
 
   /**
    * Check if resource has any major resource types (which are all NodeManagers
-   * included) a zero value.
+   * included) a zero value or negative value.
    *
    * @param resource resource
    * @return returns true if any resource is zero.
    */
-  public abstract boolean isAnyMajorResourceZero(Resource resource);
+  public abstract boolean isAnyMajorResourceZeroOrNegative(Resource resource);
 
   /**
    * Get resource <code>r</code>and normalize down using step-factor
@@ -257,4 +257,13 @@ public abstract class ResourceCalculator {
    * @return resulting normalized resource
    */
   public abstract Resource normalizeDown(Resource r, Resource stepFactor);
+
+  /**
+   * Check if resource has any major resource types (which are all NodeManagers
+   * included) has a >0 value.
+   *
+   * @param resource resource
+   * @return returns true if any resource is >0
+   */
+  public abstract boolean isAnyMajorResourceAboveZero(Resource resource);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 1c08844..7826f51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -547,11 +547,6 @@ public class Resources {
     return ret;
   }
 
-  public static boolean isAnyMajorResourceZero(ResourceCalculator rc,
-      Resource resource) {
-    return rc.isAnyMajorResourceZero(resource);
-  }
-
   public static Resource normalizeDown(ResourceCalculator calculator,
       Resource resource, Resource factor) {
     return calculator.normalizeDown(resource, factor);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
index 2589970..64b3615 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
@@ -18,12 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.PriorityQueue;
-
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.PriorityUtilizationQueueOrderingPolicy;
@@ -32,6 +26,12 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.PriorityQueue;
+
 /**
  * Calculate how much resources need to be preempted for each queue,
  * will be used by {@link PreemptionCandidatesSelector}.
@@ -40,7 +40,8 @@ public class AbstractPreemptableResourceCalculator {
 
   protected final CapacitySchedulerPreemptionContext context;
   protected final ResourceCalculator rc;
-  private boolean isReservedPreemptionCandidatesSelector;
+  protected boolean isReservedPreemptionCandidatesSelector;
+  private Resource stepFactor;
 
   static class TQComparator implements Comparator<TempQueuePerPartition> {
     private ResourceCalculator rc;
@@ -90,6 +91,11 @@ public class AbstractPreemptableResourceCalculator {
     rc = preemptionContext.getResourceCalculator();
     this.isReservedPreemptionCandidatesSelector =
         isReservedPreemptionCandidatesSelector;
+
+    stepFactor = Resource.newInstance(0, 0);
+    for (ResourceInformation ri : stepFactor.getResources()) {
+      ri.setValue(1);
+    }
   }
 
   /**
@@ -122,23 +128,24 @@ public class AbstractPreemptableResourceCalculator {
     TQComparator tqComparator = new TQComparator(rc, totGuarant);
     PriorityQueue<TempQueuePerPartition> orderedByNeed = new PriorityQueue<>(10,
         tqComparator);
-    for (Iterator<TempQueuePerPartition> i = qAlloc.iterator(); i.hasNext();) {
+    for (Iterator<TempQueuePerPartition> i = qAlloc.iterator(); i.hasNext(); ) {
       TempQueuePerPartition q = i.next();
       Resource used = q.getUsed();
 
       Resource initIdealAssigned;
       if (Resources.greaterThan(rc, totGuarant, used, q.getGuaranteed())) {
-        initIdealAssigned =
-            Resources.add(q.getGuaranteed(), q.untouchableExtra);
-      } else {
+        initIdealAssigned = Resources.add(
+            Resources.componentwiseMin(q.getGuaranteed(), q.getUsed()),
+            q.untouchableExtra);
+      } else{
         initIdealAssigned = Resources.clone(used);
       }
 
       // perform initial assignment
       initIdealAssignment(totGuarant, q, initIdealAssigned);
 
-
       Resources.subtractFrom(unassigned, q.idealAssigned);
+
       // If idealAssigned < (allocated + used + pending), q needs more
       // resources, so
       // add it to the list of underserved queues, ordered by need.
@@ -152,7 +159,6 @@ public class AbstractPreemptableResourceCalculator {
     // left
     while (!orderedByNeed.isEmpty() && Resources.greaterThan(rc, totGuarant,
         unassigned, Resources.none())) {
-      Resource wQassigned = Resource.newInstance(0, 0);
       // we compute normalizedGuarantees capacity based on currently active
       // queues
       resetCapacity(unassigned, orderedByNeed, ignoreGuarantee);
@@ -166,11 +172,26 @@ public class AbstractPreemptableResourceCalculator {
       Collection<TempQueuePerPartition> underserved = getMostUnderservedQueues(
           orderedByNeed, tqComparator);
 
+      // This value will be used in every round to calculate ideal allocation.
+      // So make a copy to avoid it changed during calculation.
+      Resource dupUnassignedForTheRound = Resources.clone(unassigned);
+
       for (Iterator<TempQueuePerPartition> i = underserved.iterator(); i
           .hasNext();) {
+        if (!rc.isAnyMajorResourceAboveZero(unassigned)) {
+          break;
+        }
+
         TempQueuePerPartition sub = i.next();
-        Resource wQavail = Resources.multiplyAndNormalizeUp(rc, unassigned,
-            sub.normalizedGuarantee, Resource.newInstance(1, 1));
+
+        // How much resource we offer to the queue (to increase its ideal_alloc
+        Resource wQavail = Resources.multiplyAndNormalizeUp(rc,
+            dupUnassignedForTheRound,
+            sub.normalizedGuarantee, this.stepFactor);
+
+        // Make sure it is not beyond unassigned
+        wQavail = Resources.componentwiseMin(wQavail, unassigned);
+
         Resource wQidle = sub.offer(wQavail, rc, totGuarant,
             isReservedPreemptionCandidatesSelector);
         Resource wQdone = Resources.subtract(wQavail, wQidle);
@@ -180,9 +201,12 @@ public class AbstractPreemptableResourceCalculator {
           // queue, recalculating its order based on need.
           orderedByNeed.add(sub);
         }
-        Resources.addTo(wQassigned, wQdone);
+
+        Resources.subtractFrom(unassigned, wQdone);
+
+        // Make sure unassigned is always larger than 0
+        unassigned = Resources.componentwiseMax(unassigned, Resources.none());
       }
-      Resources.subtractFrom(unassigned, wQassigned);
     }
 
     // Sometimes its possible that, all queues are properly served. So intra

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index f097e9c..5396d61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -132,6 +133,16 @@ public class CapacitySchedulerPreemptionUtils {
    *          map to hold preempted containers
    * @param totalPreemptionAllowed
    *          total preemption allowed per round
+   * @param conservativeDRF
+   *          should we do conservativeDRF preemption or not.
+   *          When true:
+   *            stop preempt container when any major resource type <= 0 for to-
+   *            preempt.
+   *            This is default preemption behavior of intra-queue preemption
+   *          When false:
+   *            stop preempt container when: all major resource type <= 0 for
+   *            to-preempt.
+   *            This is default preemption behavior of inter-queue preemption
    * @return should we preempt rmContainer. If we should, deduct from
    *         <code>resourceToObtainByPartition</code>
    */
@@ -140,7 +151,7 @@ public class CapacitySchedulerPreemptionUtils {
       Map<String, Resource> resourceToObtainByPartitions,
       RMContainer rmContainer, Resource clusterResource,
       Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
-      Resource totalPreemptionAllowed) {
+      Resource totalPreemptionAllowed, boolean conservativeDRF) {
     ApplicationAttemptId attemptId = rmContainer.getApplicationAttemptId();
 
     // We will not account resource of a container twice or more
@@ -152,13 +163,49 @@ public class CapacitySchedulerPreemptionUtils {
         rmContainer.getAllocatedNode());
     Resource toObtainByPartition = resourceToObtainByPartitions
         .get(nodePartition);
+    if (null == toObtainByPartition) {
+      return false;
+    }
+
+    // If a toObtain resource type == 0, set it to -1 to avoid 0 resource
+    // type affect following doPreemption check: isAnyMajorResourceZero
+    for (ResourceInformation ri : toObtainByPartition.getResources()) {
+      if (ri.getValue() == 0) {
+        ri.setValue(-1);
+      }
+    }
+
+    if (rc.isAnyMajorResourceAboveZero(toObtainByPartition) && Resources.fitsIn(
+        rc, rmContainer.getAllocatedResource(), totalPreemptionAllowed)) {
+      boolean doPreempt;
+
+      // How much resource left after preemption happen.
+      Resource toObtainAfterPreemption = Resources.subtract(toObtainByPartition,
+          rmContainer.getAllocatedResource());
+
+      if (conservativeDRF) {
+        doPreempt = !rc.isAnyMajorResourceZeroOrNegative(toObtainByPartition);
+      } else {
+        // When we want to do more aggressive preemption, we will do preemption
+        // only if:
+        // - The preempt of the container makes positive contribution to the
+        //   to-obtain resource. Positive contribution means any positive
+        //   resource type decreases.
+        //
+        //   This is example of positive contribution:
+        //     * before: <30, 10, 5>, after <20, 10, -10>
+        //   But this not positive contribution:
+        //     * before: <30, 10, 0>, after <30, 10, -15>
+        doPreempt = Resources.lessThan(rc, clusterResource,
+            Resources
+                .componentwiseMax(toObtainAfterPreemption, Resources.none()),
+            Resources.componentwiseMax(toObtainByPartition, Resources.none()));
+      }
+
+      if (!doPreempt) {
+        return false;
+      }
 
-    if (null != toObtainByPartition
-        && Resources.greaterThan(rc, clusterResource, toObtainByPartition,
-            Resources.none())
-        && Resources.fitsIn(rc, rmContainer.getAllocatedResource(),
-            totalPreemptionAllowed)
-        && !Resources.isAnyMajorResourceZero(rc, toObtainByPartition)) {
       Resources.subtractFrom(toObtainByPartition,
           rmContainer.getAllocatedResource());
       Resources.subtractFrom(totalPreemptionAllowed,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index 748548a..3b2fcbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -111,7 +111,7 @@ public class FifoCandidatesSelector
                   .tryPreemptContainerAndDeductResToObtain(rc,
                       preemptionContext, resToObtainByPartition, c,
                       clusterResource, selectedCandidates,
-                      totalPreemptionAllowed);
+                      totalPreemptionAllowed, false);
               if (!preempted) {
                 continue;
               }
@@ -187,7 +187,7 @@ public class FifoCandidatesSelector
       boolean preempted = CapacitySchedulerPreemptionUtils
           .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
               resToObtainByPartition, c, clusterResource, preemptMap,
-              totalPreemptionAllowed);
+              totalPreemptionAllowed, false);
       if (preempted) {
         Resources.subtractFrom(skippedAMSize, c.getAllocatedResource());
       }
@@ -221,7 +221,7 @@ public class FifoCandidatesSelector
       // Try to preempt this container
       CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(
           rc, preemptionContext, resToObtainByPartition, c, clusterResource,
-          selectedContainers, totalPreemptionAllowed);
+          selectedContainers, totalPreemptionAllowed, false);
 
       if (!preemptionContext.isObserveOnly()) {
         preemptionContext.getRMContext().getDispatcher().getEventHandler()
@@ -264,7 +264,7 @@ public class FifoCandidatesSelector
       // Try to preempt this container
       CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(
           rc, preemptionContext, resToObtainByPartition, c, clusterResource,
-          selectedContainers, totalPreemptionAllowed);
+          selectedContainers, totalPreemptionAllowed, false);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 1776bd4..40f333f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -278,8 +278,8 @@ public class FifoIntraQueuePreemptionPlugin
 
       // Once unallocated resource is 0, we can stop assigning ideal per app.
       if (Resources.lessThanOrEqual(rc, clusterResource,
-          queueReassignableResource, Resources.none())
-          || Resources.isAnyMajorResourceZero(rc, queueReassignableResource)) {
+          queueReassignableResource, Resources.none()) || rc
+          .isAnyMajorResourceZeroOrNegative(queueReassignableResource)) {
         continue;
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 5b6932e..a91fac7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -230,7 +230,7 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
       boolean ret = CapacitySchedulerPreemptionUtils
           .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
               resToObtainByPartition, c, clusterResource, selectedCandidates,
-              totalPreemptedResourceAllowed);
+              totalPreemptedResourceAllowed, true);
 
       // Subtract from respective user's resource usage once a container is
       // selected for preemption.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 676c14f..08d834e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -41,8 +41,6 @@ public class PreemptableResourceCalculator
   private static final Log LOG =
       LogFactory.getLog(PreemptableResourceCalculator.class);
 
-  private boolean isReservedPreemptionCandidatesSelector;
-
   /**
    * PreemptableResourceCalculator constructor
    *
@@ -95,8 +93,8 @@ public class PreemptableResourceCalculator
     }
 
     // first compute the allocation as a fixpoint based on guaranteed capacity
-    computeFixpointAllocation(tot_guarant, nonZeroGuarQueues, unassigned,
-        false);
+    computeFixpointAllocation(tot_guarant, new HashSet<>(nonZeroGuarQueues),
+        unassigned, false);
 
     // if any capacity is left unassigned, distributed among zero-guarantee
     // queues uniformly (i.e., not based on guaranteed capacity, as this is zero)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
index 9d8297d..4214acc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
@@ -151,7 +151,7 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
     //               # This is for leaf queue only.
     //               max(guaranteed, used) - assigned}
     // remain = avail - accepted
-    Resource accepted = Resources.min(rc, clusterResource,
+    Resource accepted = Resources.componentwiseMin(
         absMaxCapIdealAssignedDelta,
         Resources.min(rc, clusterResource, avail, Resources
             /*
@@ -186,6 +186,12 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
 
     accepted = acceptedByLocality(rc, accepted);
 
+    // accept should never be < 0
+    accepted = Resources.componentwiseMax(accepted, Resources.none());
+
+    // or more than offered
+    accepted = Resources.componentwiseMin(accepted, avail);
+
     Resource remain = Resources.subtract(avail, accepted);
     Resources.addTo(idealAssigned, accepted);
     return remain;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
index a8e2697..a972584 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.mockito.ArgumentMatcher;
@@ -104,10 +106,32 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
   EventHandler<Event> mDisp = null;
   ProportionalCapacityPreemptionPolicy policy = null;
   Resource clusterResource = null;
+  // Initialize resource map
+  Map<String, ResourceInformation> riMap = new HashMap<>();
+
+  private void resetResourceInformationMap() {
+    // Initialize mandatory resources
+    ResourceInformation memory = ResourceInformation.newInstance(
+        ResourceInformation.MEMORY_MB.getName(),
+        ResourceInformation.MEMORY_MB.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
+    ResourceInformation vcores = ResourceInformation.newInstance(
+        ResourceInformation.VCORES.getName(),
+        ResourceInformation.VCORES.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+    riMap.put(ResourceInformation.MEMORY_URI, memory);
+    riMap.put(ResourceInformation.VCORES_URI, vcores);
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+  }
 
   @SuppressWarnings("unchecked")
   @Before
   public void setup() {
+    resetResourceInformationMap();
+
     org.apache.log4j.Logger.getRootLogger().setLevel(
         org.apache.log4j.Level.DEBUG);
 
@@ -142,6 +166,12 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
     partitionToResource = new HashMap<>();
     nodeIdToSchedulerNodes = new HashMap<>();
     nameToCSQueues = new HashMap<>();
+    clusterResource = Resource.newInstance(0, 0);
+  }
+
+  @After
+  public void cleanup() {
+    resetResourceInformationMap();
   }
 
   public void buildEnv(String labelsConfig, String nodesConfig,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
index e9a8116..6a953cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
@@ -20,44 +20,25 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
 
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestPreemptionForQueueWithPriorities
     extends ProportionalCapacityPreemptionPolicyMockFramework {
-  // Initialize resource map
-  private Map<String, ResourceInformation> riMap = new HashMap<>();
-
   @Before
   public void setup() {
-
-    // Initialize mandatory resources
-    ResourceInformation memory = ResourceInformation.newInstance(
-        ResourceInformation.MEMORY_MB.getName(),
-        ResourceInformation.MEMORY_MB.getUnits(),
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
-    ResourceInformation vcores = ResourceInformation.newInstance(
-        ResourceInformation.VCORES.getName(),
-        ResourceInformation.VCORES.getUnits(),
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
-    riMap.put(ResourceInformation.MEMORY_URI, memory);
-    riMap.put(ResourceInformation.VCORES_URI, vcores);
-
-    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
-
+    rc = new DefaultResourceCalculator();
     super.setup();
     policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
   }
@@ -340,8 +321,8 @@ public class TestPreemptionForQueueWithPriorities
      *   - a2 (capacity=60), p=1
      * - b (capacity=30), p=1
      *   - b1 (capacity=50), p=1
-     *   - b1 (capacity=50), p=2
-     * - c (capacity=40), p=2
+     *   - b2 (capacity=50), p=2
+     * - c (capacity=40), p=1
      * </pre>
      */
     String labelsConfig = "=100,true"; // default partition
@@ -349,11 +330,11 @@ public class TestPreemptionForQueueWithPriorities
     String queuesConfig =
         // guaranteed,max,used,pending
         "root(=[100 100 100 100]);" + //root
-            "-a(=[30 100 40 50]){priority=1};" + // a
+            "-a(=[29 100 40 50]){priority=1};" + // a
             "--a1(=[12 100 20 50]){priority=1};" + // a1
-            "--a2(=[18 100 20 50]){priority=1};" + // a2
-            "-b(=[30 100 59 50]){priority=1};" + // b
-            "--b1(=[15 100 30 50]){priority=1};" + // b1
+            "--a2(=[17 100 20 50]){priority=1};" + // a2
+            "-b(=[31 100 59 50]){priority=1};" + // b
+            "--b1(=[16 100 30 50]){priority=1};" + // b1
             "--b2(=[15 100 29 50]){priority=2};" + // b2
             "-c(=[40 100 1 30]){priority=1}";   // c
     String appsConfig =
@@ -362,7 +343,7 @@ public class TestPreemptionForQueueWithPriorities
             "a2\t(1,1,n1,,20,false);" + // app2 in a2
             "b1\t(1,1,n1,,30,false);" + // app3 in b1
             "b2\t(1,1,n1,,29,false);" + // app4 in b2
-            "c\t(1,1,n1,,29,false)"; // app5 in c
+            "c\t(1,1,n1,,1,false)"; // app5 in c
 
 
     buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
@@ -370,16 +351,16 @@ public class TestPreemptionForQueueWithPriorities
 
     // Preemption should first divide capacities between a / b, and b2 should
     // get less preemption than b1 (because b2 has higher priority)
-    verify(mDisp, times(5)).handle(argThat(
+    verify(mDisp, times(6)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(1))));
-    verify(mDisp, never()).handle(argThat(
+    verify(mDisp, times(1)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(2))));
-    verify(mDisp, times(15)).handle(argThat(
+    verify(mDisp, times(13)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(3))));
-    verify(mDisp, times(9)).handle(argThat(
+    verify(mDisp, times(10)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(4))));
   }
@@ -426,7 +407,7 @@ public class TestPreemptionForQueueWithPriorities
 
     // Preemption should first divide capacities between a / b, and b1 should
     // get less preemption than b2 (because b1 has higher priority)
-    verify(mDisp, never()).handle(argThat(
+    verify(mDisp, times(3)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(1))));
     verify(mDisp, never()).handle(argThat(
@@ -505,4 +486,56 @@ public class TestPreemptionForQueueWithPriorities
             getAppAttemptId(3))));
   }
 
+  @Test
+  public void test3ResourceTypesInterQueuePreemption() throws IOException {
+    rc = new DominantResourceCalculator();
+    when(cs.getResourceCalculator()).thenReturn(rc);
+
+    // Initialize resource map
+    String RESOURCE_1 = "res1";
+    riMap.put(RESOURCE_1, ResourceInformation.newInstance(RESOURCE_1, "", 0,
+        ResourceTypes.COUNTABLE, 0, Integer.MAX_VALUE));
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    /**
+     * Queue structure is:
+     *
+     * <pre>
+     *              root
+     *           /  \  \
+     *          a    b  c
+     * </pre>
+     *  A / B / C have 33.3 / 33.3 / 33.4 resources
+     *  Total cluster resource have mem=30, cpu=18, GPU=6
+     *  A uses mem=6, cpu=3, GPU=3
+     *  B uses mem=6, cpu=3, GPU=3
+     *  C is asking mem=1,cpu=1,GPU=1
+     *
+     *  We expect it can preempt from one of the jobs
+     */
+    String labelsConfig =
+        "=30:18:6,true;";
+    String nodesConfig =
+        "n1= res=30:18:6;"; // n1 is default partition
+    String queuesConfig =
+        // guaranteed,max,used,pending
+        "root(=[30:18:6 30:18:6 12:12:6 1:1:1]){priority=1};" + //root
+            "-a(=[10:6:2 10:6:2 6:6:3 0:0:0]){priority=1};" + // a
+            "-b(=[10:6:2 10:6:2 6:6:3 0:0:0]){priority=1};" + // b
+            "-c(=[10:6:2 10:6:2 0:0:0 1:1:1]){priority=2}"; // c
+    String appsConfig=
+        //queueName\t(priority,resource,host,expression,#repeat,reserved)
+        "a\t" // app1 in a1
+            + "(1,2:2:1,n1,,3,false);" +
+            "b\t" // app2 in b2
+            + "(1,2:2:1,n1,,3,false)";
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(1)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
index c8a1f0f..14a3a9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.Before;
 import org.junit.Test;
 
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import java.io.IOException;
+
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
@@ -41,8 +46,7 @@ public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
   }
 
   @Test
-  public void testInterQueuePreemptionWithMultipleResource()
-      throws Exception {
+  public void testInterQueuePreemptionWithMultipleResource() throws Exception {
     /**
      * Queue structure is:
      *
@@ -121,4 +125,52 @@ public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(1))));
   }
-}
+
+  @Test
+  public void test3ResourceTypesInterQueuePreemption() throws IOException {
+    // Initialize resource map
+    String RESOURCE_1 = "res1";
+    riMap.put(RESOURCE_1, ResourceInformation
+        .newInstance(RESOURCE_1, "", 0, ResourceTypes.COUNTABLE, 0,
+            Integer.MAX_VALUE));
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    /*
+     *              root
+     *           /  \  \
+     *          a    b  c
+     *
+     *  A / B / C have 33.3 / 33.3 / 33.4 resources
+     *  Total cluster resource have mem=30, cpu=18, GPU=6
+     *  A uses mem=6, cpu=3, GPU=3
+     *  B uses mem=6, cpu=3, GPU=3
+     *  C is asking mem=1,cpu=1,GPU=1
+     *
+     *  We expect it can preempt from one of the jobs
+     */
+    String labelsConfig = "=30:18:6,true;";
+    String nodesConfig = "n1= res=30:18:6;"; // n1 is default partition
+    String queuesConfig =
+        // guaranteed,max,used,pending
+        "root(=[30:18:6 30:18:6 12:12:6 1:1:1]);" + //root
+            "-a(=[10:7:2 10:6:3 6:6:3 0:0:0]);" + // a
+            "-b(=[10:6:2 10:6:3 6:6:3 0:0:0]);" + // b
+            "-c(=[10:5:2 10:6:2 0:0:0 1:1:1])"; // c
+    String appsConfig =
+        //queueName\t(priority,resource,host,expression,#repeat,reserved)
+        "a\t" // app1 in a1
+            + "(1,2:2:1,n1,,3,false);" + "b\t" // app2 in b2
+            + "(1,2:2:1,n1,,3,false)";
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(0)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+    verify(mDisp, times(1)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDDS-88. Create separate message structure to represent ports in DatanodeDetails. Contributed by Nanda Kumar.

Posted by xy...@apache.org.
HDDS-88. Create separate message structure to represent ports in DatanodeDetails.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b34148c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b34148c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b34148c

Branch: refs/heads/HDDS-4
Commit: 3b34148c4f7380d201de59c4a1870b597649248f
Parents: b24098b
Author: Anu Engineer <ae...@apache.org>
Authored: Wed May 30 08:52:07 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed May 30 08:52:07 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/XceiverClient.java   |   2 +-
 .../hadoop/hdds/scm/XceiverClientGrpc.java      |   2 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java   | 219 +++++++++++--------
 .../main/java/org/apache/ratis/RatisHelper.java |   6 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |  10 +-
 .../common/transport/server/XceiverServer.java  |   3 +-
 .../transport/server/XceiverServerGrpc.java     |   3 +-
 .../server/ratis/XceiverServerRatis.java        |   3 +-
 .../common/TestDatanodeStateMachine.java        |  16 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  12 +-
 .../hdds/scm/block/TestDeletedBlockLog.java     |  18 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |  17 +-
 .../apache/hadoop/ozone/RatisTestHelper.java    |   3 +-
 .../hadoop/ozone/TestMiniOzoneCluster.java      |   8 +-
 .../ozone/container/ContainerTestHelper.java    |  12 +-
 .../container/metrics/TestContainerMetrics.java |   3 +-
 .../container/ozoneimpl/TestOzoneContainer.java |  11 +-
 .../container/server/TestContainerServer.java   |   8 +-
 .../ksm/TestKeySpaceManagerRestInterface.java   |   5 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |   4 +-
 .../hadoop/ozone/web/TestLocalOzoneVolumes.java |   4 +-
 .../hadoop/ozone/web/TestOzoneWebAccess.java    |   4 +-
 .../ozone/web/client/TestOzoneClient.java       |   4 +-
 .../hadoop/ozone/web/client/TestVolume.java     |   2 -
 .../ozone/web/client/TestVolumeRatis.java       |   4 +-
 .../ozone/web/OzoneHddsDatanodeService.java     |   5 +-
 .../hadoop/ozone/ksm/KeySpaceManager.java       |   4 +-
 .../hadoop/ozone/genesis/GenesisUtil.java       |  12 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   6 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |   3 +-
 30 files changed, 260 insertions(+), 153 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
index 42e02f9..709f0dc 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
@@ -93,7 +93,7 @@ public class XceiverClient extends XceiverClientSpi {
 
     // read port from the data node, on failure use default configured
     // port.
-    int port = leader.getContainerPort();
+    int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
     if (port == 0) {
       port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
           OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 84790e8..c787024 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -80,7 +80,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 
     // read port from the data node, on failure use default configured
     // port.
-    int port = leader.getContainerPort();
+    int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
     if (port == 0) {
       port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
           OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index b2fa291..c373e22 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.UUID;
 
 /**
@@ -42,9 +44,7 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
 
   private String ipAddress;
   private String hostName;
-  private Integer containerPort;
-  private Integer ratisPort;
-  private Integer ozoneRestPort;
+  private List<Port> ports;
 
 
   /**
@@ -53,18 +53,14 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
    * @param uuid DataNode's UUID
    * @param ipAddress IP Address of this DataNode
    * @param hostName DataNode's hostname
-   * @param containerPort Container Port
-   * @param ratisPort Ratis Port
-   * @param ozoneRestPort Rest Port
+   * @param ports Ports used by the DataNode
    */
   private DatanodeDetails(String uuid, String ipAddress, String hostName,
-      Integer containerPort, Integer ratisPort, Integer ozoneRestPort) {
+      List<Port> ports) {
     this.uuid = UUID.fromString(uuid);
     this.ipAddress = ipAddress;
     this.hostName = hostName;
-    this.containerPort = containerPort;
-    this.ratisPort = ratisPort;
-    this.ozoneRestPort = ozoneRestPort;
+    this.ports = ports;
   }
 
   /**
@@ -122,54 +118,40 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
   }
 
   /**
-   * Sets the Container Port.
-   * @param port ContainerPort
-   */
-  public void setContainerPort(int port) {
-    containerPort = port;
-  }
-
-  /**
-   * Returns standalone container Port.
+   * Sets a DataNode Port.
    *
-   * @return Container Port
+   * @param port DataNode port
    */
-  public int getContainerPort() {
-    return containerPort;
+  public void setPort(Port port) {
+    // If the port is already in the list remove it first and add the
+    // new/updated port value.
+    ports.remove(port);
+    ports.add(port);
   }
 
   /**
-   * Sets Ratis Port.
-   * @param port RatisPort
-   */
-  public void setRatisPort(int port) {
-    ratisPort = port;
-  }
-
-
-  /**
-   * Returns Ratis Port.
-   * @return Ratis Port
-   */
-  public int getRatisPort() {
-    return ratisPort;
-  }
-
-
-  /**
-   * Sets OzoneRestPort.
-   * @param port OzoneRestPort
+   * Returns all the Ports used by DataNode.
+   *
+   * @return DataNode Ports
    */
-  public void setOzoneRestPort(int port) {
-    ozoneRestPort = port;
+  public List<Port> getPorts() {
+    return ports;
   }
 
   /**
-   * Returns Ozone Rest Port.
-   * @return OzoneRestPort
+   * Given the name returns port number, null if the asked port is not found.
+   *
+   * @param name Name of the port
+   *
+   * @return Port
    */
-  public int getOzoneRestPort() {
-    return ozoneRestPort;
+  public Port getPort(Port.Name name) {
+    for (Port port : ports) {
+      if (port.getName().equals(name)) {
+        return port;
+      }
+    }
+    return null;
   }
 
   /**
@@ -188,14 +170,9 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
     if (datanodeDetailsProto.hasHostName()) {
       builder.setHostName(datanodeDetailsProto.getHostName());
     }
-    if (datanodeDetailsProto.hasContainerPort()) {
-      builder.setContainerPort(datanodeDetailsProto.getContainerPort());
-    }
-    if (datanodeDetailsProto.hasRatisPort()) {
-      builder.setRatisPort(datanodeDetailsProto.getRatisPort());
-    }
-    if (datanodeDetailsProto.hasOzoneRestPort()) {
-      builder.setOzoneRestPort(datanodeDetailsProto.getOzoneRestPort());
+    for (HddsProtos.Port port : datanodeDetailsProto.getPortsList()) {
+      builder.addPort(newPort(
+          Port.Name.valueOf(port.getName().toUpperCase()), port.getValue()));
     }
     return builder.build();
   }
@@ -214,14 +191,11 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
     if (hostName != null) {
       builder.setHostName(hostName);
     }
-    if (containerPort != null) {
-      builder.setContainerPort(containerPort);
-    }
-    if (ratisPort != null) {
-      builder.setRatisPort(ratisPort);
-    }
-    if (ozoneRestPort != null) {
-      builder.setOzoneRestPort(ozoneRestPort);
+    for (Port port : ports) {
+      builder.addPorts(HddsProtos.Port.newBuilder()
+          .setName(port.getName().toString())
+          .setValue(port.getValue())
+          .build());
     }
     return builder.build();
   }
@@ -268,9 +242,15 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
     private String id;
     private String ipAddress;
     private String hostName;
-    private Integer containerPort;
-    private Integer ratisPort;
-    private Integer ozoneRestPort;
+    private List<Port> ports;
+
+    /**
+     * Default private constructor. To create Builder instance use
+     * DatanodeDetails#newBuilder.
+     */
+    private Builder() {
+      ports = new ArrayList<>();
+    }
 
     /**
      * Sets the DatanodeUuid.
@@ -304,50 +284,111 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
       this.hostName = host;
       return this;
     }
+
     /**
-     * Sets the ContainerPort.
+     * Adds a DataNode Port.
+     *
+     * @param port DataNode port
      *
-     * @param port ContainerPort
      * @return DatanodeDetails.Builder
      */
-    public Builder setContainerPort(Integer port) {
-      this.containerPort = port;
+    public Builder addPort(Port port) {
+      this.ports.add(port);
       return this;
     }
 
     /**
-     * Sets the RatisPort.
+     * Builds and returns DatanodeDetails instance.
      *
-     * @param port RatisPort
-     * @return DatanodeDetails.Builder
+     * @return DatanodeDetails
      */
-    public Builder setRatisPort(Integer port) {
-      this.ratisPort = port;
-      return this;
+    public DatanodeDetails build() {
+      Preconditions.checkNotNull(id);
+      return new DatanodeDetails(id, ipAddress, hostName, ports);
     }
 
+  }
+
+  /**
+   * Constructs a new Port with name and value.
+   *
+   * @param name Name of the port
+   * @param value Port number
+   *
+   * @return {@code Port} instance
+   */
+  public static Port newPort(Port.Name name, Integer value) {
+    return new Port(name, value);
+  }
+
+  /**
+   * Container to hold DataNode Port details.
+   */
+  public static class Port {
+
+    /**
+     * Ports that are supported in DataNode.
+     */
+    public enum Name {
+      STANDALONE, RATIS, REST
+    }
+
+    private Name name;
+    private Integer value;
+
     /**
-     * Sets the OzoneRestPort.
+     * Private constructor for constructing Port object. Use
+     * DatanodeDetails#newPort to create a new Port object.
      *
-     * @param port OzoneRestPort
-     * @return DatanodeDetails.Builder
+     * @param name
+     * @param value
      */
-    public Builder setOzoneRestPort(Integer port) {
-      this.ozoneRestPort = port;
-      return this;
+    private Port(Name name, Integer value) {
+      this.name = name;
+      this.value = value;
     }
 
     /**
-     * Builds and returns DatanodeDetails instance.
+     * Returns the name of the port.
      *
-     * @return DatanodeDetails
+     * @return Port name
      */
-    public DatanodeDetails build() {
-      Preconditions.checkNotNull(id);
-      return new DatanodeDetails(id, ipAddress, hostName, containerPort,
-          ratisPort, ozoneRestPort);
+    public Name getName() {
+      return name;
+    }
+
+    /**
+     * Returns the port number.
+     *
+     * @return Port number
+     */
+    public Integer getValue() {
+      return value;
+    }
+
+    @Override
+    public int hashCode() {
+      return name.hashCode();
     }
 
+    /**
+     * Ports are considered equal if they have the same name.
+     *
+     * @param anObject
+     *          The object to compare this {@code Port} against
+     * @return {@code true} if the given object represents a {@code Port}
+               and has the same name, {@code false} otherwise
+     */
+    @Override
+    public boolean equals(Object anObject) {
+      if (this == anObject) {
+        return true;
+      }
+      if (anObject instanceof Port) {
+        return name.equals(((Port) anObject).name);
+      }
+      return false;
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
index 3a55831..20356b3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
@@ -48,11 +48,13 @@ public interface RatisHelper {
   Logger LOG = LoggerFactory.getLogger(RatisHelper.class);
 
   static String toRaftPeerIdString(DatanodeDetails id) {
-    return id.getUuidString() + "_" + id.getRatisPort();
+    return id.getUuidString() + "_" +
+        id.getPort(DatanodeDetails.Port.Name.RATIS);
   }
 
   static String toRaftPeerAddressString(DatanodeDetails id) {
-    return id.getIpAddress() + ":" + id.getRatisPort();
+    return id.getIpAddress() + ":" +
+        id.getPort(DatanodeDetails.Port.Name.RATIS);
   }
 
   static RaftPeerId toRaftPeerId(DatanodeDetails id) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
index 6ea5727..f834c73 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -29,13 +29,15 @@ option java_generate_equals_and_hash = true;
 package hadoop.hdds;
 
 message DatanodeDetailsProto {
-    // TODO: make the port as a seperate proto message and use it here
     required string uuid = 1;  // UUID assigned to the Datanode.
     required string ipAddress = 2;     // IP address
     required string hostName = 3;      // hostname
-    optional uint32 containerPort = 4 [default = 0];  // Ozone stand_alone protocol
-    optional uint32 ratisPort = 5 [default = 0];      //Ozone ratis port
-    optional uint32 ozoneRestPort = 6 [default = 0];
+    repeated Port ports = 4;
+}
+
+message Port {
+    required string name = 1;
+    required uint32 value = 2;
 }
 
 message PipelineChannel {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
index 7105fd7..455df49 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
@@ -80,7 +80,8 @@ public final class XceiverServer implements XceiverServerSpi {
             + "fallback to use default port {}", this.port, e);
       }
     }
-    datanodeDetails.setContainerPort(port);
+    datanodeDetails.setPort(
+        DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port));
     this.storageContainer = dispatcher;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 30a2f87..550fe41 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -71,7 +71,8 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
             + "fallback to use default port {}", this.port, e);
       }
     }
-    datanodeDetails.setContainerPort(port);
+    datanodeDetails.setPort(
+        DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port));
     server = ((NettyServerBuilder) ServerBuilder.forPort(port))
         .maxMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
         .addService(new GrpcXceiverService(dispatcher))

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 46def09..33c25ea 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -203,7 +203,8 @@ public final class XceiverServerRatis implements XceiverServerSpi {
             + "fallback to use default port {}", localPort, e);
       }
     }
-    datanodeDetails.setRatisPort(localPort);
+    datanodeDetails.setPort(
+        DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, localPort));
     return new XceiverServerRatis(datanodeDetails, localPort, storageDir,
         dispatcher, ozoneConf);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index ee82c57..ece7545 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -209,8 +209,10 @@ public class TestDatanodeStateMachine {
         conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID));
     idPath.delete();
     DatanodeDetails datanodeDetails = getNewDatanodeDetails();
-    datanodeDetails.setContainerPort(
+    DatanodeDetails.Port port = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE,
         OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+    datanodeDetails.setPort(port);
     ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
 
     try (DatanodeStateMachine stateMachine =
@@ -360,13 +362,19 @@ public class TestDatanodeStateMachine {
   }
 
   private DatanodeDetails getNewDatanodeDetails() {
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, 0);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, 0);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, 0);
     return DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID().toString())
         .setHostName("localhost")
         .setIpAddress("127.0.0.1")
-        .setContainerPort(0)
-        .setRatisPort(0)
-        .setOzoneRestPort(0)
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort)
         .build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index b8036d7..7568bf3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -124,13 +124,19 @@ public final class TestUtils {
             .nextInt(256) + "." + random.nextInt(256);
 
     String hostName = uuid;
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, 0);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, 0);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, 0);
     DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
     builder.setUuid(uuid)
         .setHostName("localhost")
         .setIpAddress(ipAddress)
-        .setContainerPort(0)
-        .setRatisPort(0)
-        .setOzoneRestPort(0);
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort);
     return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 8c12806..adb212a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -265,21 +265,27 @@ public class TestDeletedBlockLog {
 
     int count = 0;
     long containerID = 0L;
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, 0);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, 0);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, 0);
     DatanodeDetails dnId1 = DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID().toString())
         .setIpAddress("127.0.0.1")
         .setHostName("localhost")
-        .setContainerPort(0)
-        .setRatisPort(0)
-        .setOzoneRestPort(0)
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort)
         .build();
     DatanodeDetails dnId2 = DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID().toString())
         .setIpAddress("127.0.0.1")
         .setHostName("localhost")
-        .setContainerPort(0)
-        .setRatisPort(0)
-        .setOzoneRestPort(0)
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort)
         .build();
     Mapping mappingService = mock(ContainerMapping.class);
     // Creates {TXNum} TX in the log.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index ad8b016..f0bfef1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -24,6 +24,7 @@ import java.util.Optional;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ipc.Client;
@@ -219,13 +220,15 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     datanodeService.stop();
     datanodeService.join();
     // ensure same ports are used across restarts.
-    Configuration config = datanodeService.getConf();
-    int currentPort = datanodeService.getDatanodeDetails().getContainerPort();
-    config.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
-    config.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
-    int ratisPort = datanodeService.getDatanodeDetails().getRatisPort();
-    config.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
-    config.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
+    Configuration conf = datanodeService.getConf();
+    int currentPort = datanodeService.getDatanodeDetails()
+        .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
+    conf.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
+    conf.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
+    int ratisPort = datanodeService.getDatanodeDetails()
+        .getPort(DatanodeDetails.Port.Name.RATIS).getValue();
+    conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
+    conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
     datanodeService.start(null);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
index 1a35c50..fce9e77 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.ratis.rpc.RpcType;
@@ -78,7 +79,7 @@ public interface RatisTestHelper {
 
     public int getDatanodeOzoneRestPort() {
       return cluster.getHddsDatanodes().get(0).getDatanodeDetails()
-          .getOzoneRestPort();
+          .getPort(DatanodeDetails.Port.Name.REST).getValue();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 29238cf..0254984 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachin
 import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.hdds.scm.XceiverClient;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -44,6 +43,7 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
 
+import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
@@ -114,9 +114,9 @@ public class TestMiniOzoneCluster {
     DatanodeDetails id1 = TestUtils.getDatanodeDetails();
     DatanodeDetails id2 = TestUtils.getDatanodeDetails();
     DatanodeDetails id3 = TestUtils.getDatanodeDetails();
-    id1.setContainerPort(1);
-    id2.setContainerPort(2);
-    id3.setContainerPort(3);
+    id1.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 1));
+    id2.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 2));
+    id3.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 3));
 
     // Write a single ID to the file and read it out
     File validIdsFile = new File(WRITE_TMP, "valid-values.id");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index d2a6434..7046132 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -94,13 +94,19 @@ public final class ContainerTestHelper {
   public static DatanodeDetails createDatanodeDetails() throws IOException {
     ServerSocket socket = new ServerSocket(0);
     int port = socket.getLocalPort();
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, port);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, port);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, port);
     DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID().toString())
         .setIpAddress(socket.getInetAddress().getHostAddress())
         .setHostName(socket.getInetAddress().getHostName())
-        .setContainerPort(port)
-        .setRatisPort(port)
-        .setOzoneRestPort(port)
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort)
         .build();
 
     socket.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 2921be2..ccad6f8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -65,7 +65,8 @@ public class TestContainerMetrics {
           .createSingleNodePipeline();
       OzoneConfiguration conf = new OzoneConfiguration();
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getLeader().getContainerPort());
+          pipeline.getLeader()
+              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
       conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
           interval);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 513974a..67a8160 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -62,8 +63,8 @@ public class TestOzoneContainer {
       // We don't start Ozone Container via data node, we will do it
       // independently in our test path.
       Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getLeader().getContainerPort());
+      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
+              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
       container = new OzoneContainer(TestUtils.getDatanodeDetails(), conf);
       container.start();
@@ -101,7 +102,8 @@ public class TestOzoneContainer {
       Pipeline pipeline =
           ContainerTestHelper.createSingleNodePipeline();
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getLeader().getContainerPort());
+          pipeline.getLeader()
+              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
       cluster = MiniOzoneCluster.newBuilder(conf)
           .setRandomContainerPort(false)
@@ -527,7 +529,8 @@ public class TestOzoneContainer {
     Pipeline pipeline =
         ContainerTestHelper.createSingleNodePipeline();
     conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        pipeline.getLeader().getContainerPort());
+        pipeline.getLeader()
+            .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
     // This client talks to ozone container via datanode.
     return new XceiverClient(pipeline, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index eb170ea..d4c572f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -103,7 +103,8 @@ public class TestContainerServer {
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
     runTestClientServer(1,
         (pipeline, conf) -> conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-            pipeline.getLeader().getContainerPort()),
+            pipeline.getLeader()
+                .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
         XceiverClient::new,
         (dn, conf) -> new XceiverServer(datanodeDetails, conf,
             new TestContainerDispatcher()),
@@ -130,7 +131,7 @@ public class TestContainerServer {
   static XceiverServerRatis newXceiverServerRatis(
       DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
     conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
-        dn.getRatisPort());
+        dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
     final String dir = TEST_DIR + dn.getUuid();
     conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
 
@@ -208,7 +209,8 @@ public class TestContainerServer {
       Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
       OzoneConfiguration conf = new OzoneConfiguration();
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getLeader().getContainerPort());
+          pipeline.getLeader()
+              .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
       Dispatcher dispatcher =
               new Dispatcher(mock(ContainerManager.class), conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
index 2fb70f9..feb83d3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
@@ -118,8 +118,9 @@ public class TestKeySpaceManagerRestInterface {
       switch (type) {
       case HTTP:
       case HTTPS:
-        Assert.assertEquals(datanodeDetails.getOzoneRestPort(),
-            (int) ports.get(type));
+        Assert.assertEquals(
+            datanodeDetails.getPort(DatanodeDetails.Port.Name.REST).getValue(),
+            ports.get(type));
         break;
       default:
         // KSM only sends Datanode's info port details

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
index 1015ae1..0e61391 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.web;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -67,7 +68,8 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
     port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
+        .getDatanodeDetails()
+        .getPort(DatanodeDetails.Port.Name.REST).getValue();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java
index 922587e..441f771 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.web;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.TestOzoneHelper;
@@ -70,7 +71,8 @@ public class TestLocalOzoneVolumes extends TestOzoneHelper {
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
     port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
+        .getDatanodeDetails().getPort(
+            DatanodeDetails.Port.Name.REST).getValue();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
index 6c32f07..c014a60 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.web;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -78,7 +79,8 @@ public class TestOzoneWebAccess {
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
     port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
+        .getDatanodeDetails().getPort(
+            DatanodeDetails.Port.Name.REST).getValue();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java
index 627826e..86de8df 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java
@@ -43,6 +43,7 @@ import io.netty.handler.codec.http.LastHttpContent;
 import io.netty.handler.logging.LogLevel;
 import io.netty.handler.logging.LoggingHandler;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -96,7 +97,8 @@ public class TestOzoneClient {
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
     int port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
+        .getDatanodeDetails()
+        .getPort(DatanodeDetails.Port.Name.REST).getValue();
     endpoint = String.format("http://localhost:%d", port);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
index f8c7eec..a510430 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
@@ -81,8 +81,6 @@ public class TestVolume {
 
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
-    final int port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
 
     client = new RpcClient(conf);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
index bc4ba25..dcb4030 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.web.client;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -63,7 +64,8 @@ public class TestVolumeRatis {
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
     cluster.waitForClusterToBeReady();
     final int port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
+        .getDatanodeDetails()
+        .getPort(DatanodeDetails.Port.Name.REST).getValue();
 
     client = new RpcClient(conf);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
index 2283ba6..87b1e21 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.web;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer;
@@ -51,8 +52,10 @@ public class OzoneHddsDatanodeService implements ServicePlugin {
         objectStoreRestHttpServer = new ObjectStoreRestHttpServer(
             conf, null, handler);
         objectStoreRestHttpServer.start();
-        hddsDatanodeService.getDatanodeDetails().setOzoneRestPort(
+        DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+            DatanodeDetails.Port.Name.REST,
             objectStoreRestHttpServer.getHttpAddress().getPort());
+        hddsDatanodeService.getDatanodeDetails().setPort(restPort);
 
       } catch (IOException e) {
         throw new RuntimeException("Can't start the Object Store Rest server",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index d0f0c9b..dc8fc91 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.BlockingService;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -896,7 +897,8 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
 
       dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
           .setType(ServicePort.Type.HTTP)
-          .setValue(datanode.getOzoneRestPort())
+          .setValue(DatanodeDetails.getFromProtoBuf(datanode)
+              .getPort(DatanodeDetails.Port.Name.REST).getValue())
           .build());
 
       services.add(dnServiceInfoBuilder.build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
index 611b62d..7f864ae 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
@@ -78,13 +78,19 @@ public final class GenesisUtil {
         random.nextInt(256) + "." + random.nextInt(256) + "." + random
             .nextInt(256) + "." + random.nextInt(256);
 
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, 0);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, 0);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, 0);
     DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
     builder.setUuid(uuid)
         .setHostName("localhost")
         .setIpAddress(ipAddress)
-        .setContainerPort(0)
-        .setRatisPort(0)
-        .setOzoneRestPort(0);
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort);
     return builder.build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 028b1fc..d4ac994 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -530,7 +530,8 @@ public class SQLCLI  extends Configured implements Tool {
         // but this seems a bit cleaner.
         String ipAddr = dd.getIpAddress();
         String hostName = dd.getHostName();
-        int containerPort = dd.getContainerPort();
+        int containerPort = DatanodeDetails.getFromProtoBuf(dd)
+            .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
         String insertMachineInfo = String.format(
             INSERT_DATANODE_INFO, hostName, uuid, ipAddr, containerPort);
         executeSQL(conn, insertMachineInfo);
@@ -598,7 +599,8 @@ public class SQLCLI  extends Configured implements Tool {
     String insertDatanodeDetails = String
         .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(),
             datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(),
-            datanodeDetails.getContainerPort());
+            datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE)
+                .getValue());
     executeSQL(conn, insertDatanodeDetails);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34148c/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
index 4d8c9d6..b82c4a1 100644
--- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -90,7 +90,8 @@ public class TestOzoneFSInputStream {
     // Fetch the host and port for File System init
     DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
         .getDatanodeDetails();
-    int port = datanodeDetails.getOzoneRestPort();
+    int port = datanodeDetails
+        .getPort(DatanodeDetails.Port.Name.REST).getValue();
     String host = datanodeDetails.getHostName();
 
     // Set the fs.defaultFS and start the filesystem


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed by Vinod Kumar Vavilapalli

Posted by xy...@apache.org.
YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed by Vinod Kumar Vavilapalli


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ab960f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ab960f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ab960f

Branch: refs/heads/HDDS-4
Commit: 31ab960f4f931df273481927b897388895d803ba
Parents: 438ef49
Author: Jason Lowe <jl...@apache.org>
Authored: Tue May 29 11:00:30 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue May 29 11:00:30 2018 -0500

----------------------------------------------------------------------
 hadoop-project/pom.xml                                          | 5 +++++
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml        | 5 +++++
 2 files changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 73c3f5b..59a9bd2 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1144,6 +1144,11 @@
         <version>1.8.5</version>
       </dependency>
       <dependency>
+        <groupId>org.objenesis</groupId>
+        <artifactId>objenesis</artifactId>
+        <version>1.0</version>
+      </dependency>
+      <dependency>
         <groupId>org.mock-server</groupId>
         <artifactId>mockserver-netty</artifactId>
         <version>3.9.2</version>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index f310518..0527095 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -155,6 +155,11 @@
       <artifactId>leveldbjni-all</artifactId>
     </dependency>
 
+    <dependency>
+      <groupId>org.objenesis</groupId>
+      <artifactId>objenesis</artifactId>
+    </dependency>
+
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.hadoop</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91d7c74e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91d7c74e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91d7c74e

Branch: refs/heads/HDDS-4
Commit: 91d7c74e6aa4850922f68bab490b585443e4fccb
Parents: 7c34366
Author: Inigo Goiri <in...@apache.org>
Authored: Mon May 28 10:26:47 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon May 28 10:26:47 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java   | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91d7c74e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
index 4335527..c63ba34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
@@ -40,6 +40,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.nio.file.Paths;
 import java.security.NoSuchAlgorithmException;
@@ -91,8 +92,10 @@ public class TestErasureCodingExerciseAPIs {
     // Set up java key store
     String testRootDir = Paths.get(new FileSystemTestHelper().getTestRootDir())
         .toString();
+    Path targetFile = new Path(new File(testRootDir).getAbsolutePath(),
+        "test.jks");
     String keyProviderURI = JavaKeyStoreProvider.SCHEME_NAME + "://file"
-        + new Path(testRootDir, "test.jks").toUri();
+        + targetFile.toUri();
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         keyProviderURI);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HADOOP-14946 S3Guard testPruneCommandCLI can fail. Contributed by Gabor Bota.

Posted by xy...@apache.org.
HADOOP-14946 S3Guard testPruneCommandCLI can fail. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30284d02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30284d02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30284d02

Branch: refs/heads/HDDS-4
Commit: 30284d020d36c502dad5bdbae61ec48e9dfe9f8c
Parents: 201440b
Author: Aaron Fabbri <fa...@apache.org>
Authored: Tue May 29 13:38:15 2018 -0700
Committer: Aaron Fabbri <fa...@apache.org>
Committed: Tue May 29 13:38:15 2018 -0700

----------------------------------------------------------------------
 .../s3guard/AbstractS3GuardToolTestBase.java    | 52 +++++++++++++++++---
 1 file changed, 44 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30284d02/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 4381749..2b43810 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -31,6 +31,7 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.util.StopWatch;
 import org.junit.Assume;
 import org.junit.Test;
 
@@ -61,6 +62,8 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
   protected static final String S3A_THIS_BUCKET_DOES_NOT_EXIST
       = "s3a://this-bucket-does-not-exist-00000000000";
 
+  private static final int PRUNE_MAX_AGE_SECS = 2;
+
   private MetadataStore ms;
 
   protected static void expectResult(int expected,
@@ -186,24 +189,57 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
     }
   }
 
+  /**
+   * Attempt to test prune() with sleep() without having flaky tests
+   * when things run slowly. Test is basically:
+   * 1. Set max path age to X seconds
+   * 2. Create some files (which writes entries to MetadataStore)
+   * 3. Sleep X+2 seconds (all files from above are now "stale")
+   * 4. Create some other files (these are "fresh").
+   * 5. Run prune on MetadataStore.
+   * 6. Assert that only files that were created before the sleep() were pruned.
+   *
+   * Problem is: #6 can fail if X seconds elapse between steps 4 and 5, since
+   * the newer files also become stale and get pruned.  This is easy to
+   * reproduce by running all integration tests in parallel with a ton of
+   * threads, or anything else that slows down execution a lot.
+   *
+   * Solution: Keep track of time elapsed between #4 and #5, and if it
+   * exceeds X, just print a warn() message instead of failing.
+   *
+   * @param cmdConf configuration for command
+   * @param parent path
+   * @param args command args
+   * @throws Exception
+   */
   private void testPruneCommand(Configuration cmdConf, Path parent,
       String...args) throws Exception {
     Path keepParent = path("prune-cli-keep");
+    StopWatch timer = new StopWatch();
     try {
-      getFileSystem().mkdirs(parent);
-      getFileSystem().mkdirs(keepParent);
-
       S3GuardTool.Prune cmd = new S3GuardTool.Prune(cmdConf);
       cmd.setMetadataStore(ms);
 
+      getFileSystem().mkdirs(parent);
+      getFileSystem().mkdirs(keepParent);
       createFile(new Path(parent, "stale"), true, true);
       createFile(new Path(keepParent, "stale-to-keep"), true, true);
-      Thread.sleep(TimeUnit.SECONDS.toMillis(2));
+
+      Thread.sleep(TimeUnit.SECONDS.toMillis(PRUNE_MAX_AGE_SECS + 2));
+
+      timer.start();
       createFile(new Path(parent, "fresh"), true, true);
 
       assertMetastoreListingCount(parent, "Children count before pruning", 2);
       exec(cmd, args);
-      assertMetastoreListingCount(parent, "Pruned children count", 1);
+      long msecElapsed = timer.now(TimeUnit.MILLISECONDS);
+      if (msecElapsed >= PRUNE_MAX_AGE_SECS * 1000) {
+        LOG.warn("Skipping an assertion: Test running too slowly ({} msec)",
+            msecElapsed);
+      } else {
+        assertMetastoreListingCount(parent, "Pruned children count remaining",
+            1);
+      }
       assertMetastoreListingCount(keepParent,
           "This child should have been kept (prefix restriction).", 1);
     } finally {
@@ -224,13 +260,14 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
   public void testPruneCommandCLI() throws Exception {
     Path testPath = path("testPruneCommandCLI");
     testPruneCommand(getFileSystem().getConf(), testPath,
-        "prune", "-seconds", "1", testPath.toString());
+        "prune", "-seconds", String.valueOf(PRUNE_MAX_AGE_SECS),
+        testPath.toString());
   }
 
   @Test
   public void testPruneCommandConf() throws Exception {
     getConfiguration().setLong(Constants.S3GUARD_CLI_PRUNE_AGE,
-        TimeUnit.SECONDS.toMillis(1));
+        TimeUnit.SECONDS.toMillis(PRUNE_MAX_AGE_SECS));
     Path testPath = path("testPruneCommandConf");
     testPruneCommand(getConfiguration(), testPath,
         "prune", testPath.toString());
@@ -286,7 +323,6 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
    * Execute a command, returning the buffer if the command actually completes.
    * If an exception is raised the output is logged instead.
    * @param cmd command
-   * @param buf buffer to use for tool output (not SLF4J output)
    * @param args argument list
    * @throws Exception on any failure
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HADOOP-15497. TestTrash should use proper test path to avoid failing on Windows. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HADOOP-15497. TestTrash should use proper test path to avoid failing on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c75f8e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c75f8e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c75f8e4

Branch: refs/heads/HDDS-4
Commit: 3c75f8e4933221fa60a87e86a3db5e4727530b6f
Parents: 31ab960
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 29 09:11:08 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 29 09:11:08 2018 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java     | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c75f8e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 12aed29..fa2d21f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -49,9 +49,11 @@ import org.apache.hadoop.util.Time;
  */
 public class TestTrash {
 
-  private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
+  private final static File BASE_PATH = new File(GenericTestUtils.getTempPath(
       "testTrash"));
 
+  private final static Path TEST_DIR = new Path(BASE_PATH.getAbsolutePath());
+
   @Before
   public void setUp() throws IOException {
     // ensure each test initiates a FileSystem instance,
@@ -682,7 +684,7 @@ public class TestTrash {
   static class TestLFS extends LocalFileSystem {
     Path home;
     TestLFS() {
-      this(new Path(TEST_DIR, "user/test"));
+      this(TEST_DIR);
     }
     TestLFS(final Path home) {
       super(new RawLocalFileSystem() {
@@ -809,8 +811,8 @@ public class TestTrash {
    */
   public static void verifyTrashPermission(FileSystem fs, Configuration conf)
       throws IOException {
-    Path caseRoot = new Path(
-        GenericTestUtils.getTempPath("testTrashPermission"));
+    Path caseRoot = new Path(BASE_PATH.getPath(),
+        "testTrashPermission");
     try (FileSystem fileSystem = fs){
       Trash trash = new Trash(fileSystem, conf);
       FileSystemTestWrapper wrapper =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda)

Posted by xy...@apache.org.
YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda)

Change-Id: I9f8e8f621650347f6c2f9e3420edee9eb2f356a4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3061bfcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3061bfcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3061bfcd

Branch: refs/heads/HDDS-4
Commit: 3061bfcde53210d2032df3814243498b27a997b7
Parents: 3c75f8e
Author: Wangda Tan <wa...@apache.org>
Authored: Tue May 29 09:23:11 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue May 29 09:23:11 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/service/provider/ProviderUtils.java | 3 +--
 .../apache/hadoop/yarn/service/provider/TestProviderUtils.java | 6 +++---
 2 files changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
index 1ad5fd8..ac90992 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
@@ -298,8 +298,7 @@ public class ProviderUtils implements YarnServiceConstants {
         destFile = new Path(staticFile.getDestFile());
       }
 
-      String symlink = APP_RESOURCES_DIR + "/" + destFile.getName();
-      addLocalResource(launcher, symlink, localResource, destFile);
+      addLocalResource(launcher, destFile.getName(), localResource, destFile);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
index 6e8bc43..5d794d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
@@ -154,11 +154,11 @@ public class TestProviderUtils {
 
     ProviderUtils.handleStaticFilesForLocalization(launcher, sfs,
         compLaunchCtx);
-    Mockito.verify(launcher).addLocalResource(Mockito.eq("resources/destFile1"),
+    Mockito.verify(launcher).addLocalResource(Mockito.eq("destFile1"),
         any(LocalResource.class));
     Mockito.verify(launcher).addLocalResource(
-        Mockito.eq("resources/destFile_2"), any(LocalResource.class));
+        Mockito.eq("destFile_2"), any(LocalResource.class));
     Mockito.verify(launcher).addLocalResource(
-        Mockito.eq("resources/sourceFile4"), any(LocalResource.class));
+        Mockito.eq("sourceFile4"), any(LocalResource.class));
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDFS-13626. Fix incorrect username when deny the setOwner operation. Contributed by Zsolt Venczel.

Posted by xy...@apache.org.
HDFS-13626. Fix incorrect username when deny the setOwner operation. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b24098bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b24098bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b24098bc

Branch: refs/heads/HDDS-4
Commit: b24098bc8ffe976d662acabc168e20eac8cc8460
Parents: 5f6769f
Author: Yiqun Lin <yq...@apache.org>
Authored: Wed May 30 16:52:21 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Wed May 30 16:52:21 2018 +0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/FSDirAttrOp.java       |  4 ++--
 .../org/apache/hadoop/security/TestPermission.java     | 13 ++++++++-----
 2 files changed, 10 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24098bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 406fe80..1dbee96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -82,12 +82,12 @@ public class FSDirAttrOp {
       fsd.checkOwner(pc, iip);
       if (!pc.isSuperUser()) {
         if (username != null && !pc.getUser().equals(username)) {
-          throw new AccessControlException("User " + username
+          throw new AccessControlException("User " + pc.getUser()
               + " is not a super user (non-super user cannot change owner).");
         }
         if (group != null && !pc.isMemberOfGroup(group)) {
           throw new AccessControlException(
-              "User " + username + " does not belong to " + group);
+              "User " + pc.getUser() + " does not belong to " + group);
         }
       }
       unprotectedSetOwner(fsd, iip, username, group);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24098bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 813ac5a..388e7f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -337,7 +337,8 @@ public class TestPermission {
       fail("Expect ACE when a non-super user tries to change a file to a " +
           "group where the user does not belong.");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("User null does not belong to"));
+      assertThat(e.getMessage(), startsWith("User " +
+          userfs.getFileStatus(file).getOwner() + " does not belong to"));
     }
   }
 
@@ -371,8 +372,9 @@ public class TestPermission {
       userfs.setOwner(file, NOUSER, null);
       fail("Expect ACE when a non-super user tries to change owner");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("User " + NOUSER
-          + " is not a super user (non-super user cannot change owner)"));
+      assertThat(e.getMessage(), startsWith("User " +
+          userfs.getFileStatus(file).getOwner() +
+          " is not a super user (non-super user cannot change owner)"));
     }
   }
 
@@ -397,8 +399,9 @@ public class TestPermission {
       fail("Expect ACE or FNFE when a non-super user tries to change owner " +
           "for a non-existent file");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("User " + NOUSER
-          + " is not a super user (non-super user cannot change owner)"));
+      assertThat(e.getMessage(), startsWith("User " +
+          userfs.getFileStatus(file).getOwner() +
+          " is not a super user (non-super user cannot change owner)"));
     } catch (FileNotFoundException e) {
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: YARN-8329. Docker client configuration can still be set incorrectly. Contributed by Shane Kumpf

Posted by xy...@apache.org.
YARN-8329. Docker client configuration can still be set incorrectly. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4827e9a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4827e9a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4827e9a9

Branch: refs/heads/HDDS-4
Commit: 4827e9a9085b306bc379cb6e0b1fe4b92326edcd
Parents: e3236a9
Author: Jason Lowe <jl...@apache.org>
Authored: Tue May 29 14:43:17 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue May 29 14:43:17 2018 -0500

----------------------------------------------------------------------
 .../yarn/util/DockerClientConfigHandler.java    | 23 +++++++++++---------
 .../security/TestDockerClientConfigHandler.java |  4 ++--
 .../runtime/DockerLinuxContainerRuntime.java    |  7 +++---
 3 files changed, 19 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4827e9a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
index 5522cf4..8ec4deb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
@@ -154,14 +154,15 @@ public final class DockerClientConfigHandler {
    * @param outConfigFile the File to write the Docker client configuration to.
    * @param credentials the populated Credentials object.
    * @throws IOException if the write fails.
+   * @return true if a Docker credential is found in the supplied credentials.
    */
-  public static void writeDockerCredentialsToPath(File outConfigFile,
+  public static boolean writeDockerCredentialsToPath(File outConfigFile,
       Credentials credentials) throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    ObjectNode rootNode = mapper.createObjectNode();
-    ObjectNode registryUrlNode = mapper.createObjectNode();
     boolean foundDockerCred = false;
     if (credentials.numberOfTokens() > 0) {
+      ObjectMapper mapper = new ObjectMapper();
+      ObjectNode rootNode = mapper.createObjectNode();
+      ObjectNode registryUrlNode = mapper.createObjectNode();
       for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
         if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) {
           foundDockerCred = true;
@@ -176,12 +177,14 @@ public final class DockerClientConfigHandler {
           }
         }
       }
+      if (foundDockerCred) {
+        rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
+        String json = mapper.writerWithDefaultPrettyPrinter()
+            .writeValueAsString(rootNode);
+        FileUtils.writeStringToFile(
+            outConfigFile, json, StandardCharsets.UTF_8);
+      }
     }
-    if (foundDockerCred) {
-      rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
-      String json =
-          mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
-      FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
-    }
+    return foundDockerCred;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4827e9a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
index c4cbe45..cfe5a45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
@@ -116,8 +116,8 @@ public class TestDockerClientConfigHandler {
     Credentials credentials =
         DockerClientConfigHandler.readCredentialsFromConfigFile(
             new Path(file.toURI()), conf, APPLICATION_ID);
-    DockerClientConfigHandler.writeDockerCredentialsToPath(outFile,
-        credentials);
+    assertTrue(DockerClientConfigHandler.writeDockerCredentialsToPath(outFile,
+        credentials));
     assertTrue(outFile.exists());
     String fileContents = FileUtils.readFileToString(outFile);
     assertTrue(fileContents.contains("auths"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4827e9a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 5e2233b..fc095d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -1299,14 +1299,15 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
                   .getParent();
           File dockerConfigPath = new File(nmPrivateDir + "/config.json");
           try {
-            DockerClientConfigHandler
-                .writeDockerCredentialsToPath(dockerConfigPath, credentials);
+            if (DockerClientConfigHandler
+                .writeDockerCredentialsToPath(dockerConfigPath, credentials)) {
+              dockerRunCommand.setClientConfigDir(dockerConfigPath.getParent());
+            }
           } catch (IOException e) {
             throw new ContainerExecutionException(
                 "Unable to write Docker client credentials to "
                     + dockerConfigPath);
           }
-          dockerRunCommand.setClientConfigDir(dockerConfigPath.getParent());
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-8333. Load balance YARN services using RegistryDNS multiple A records. Contributed by Eric Yang

Posted by xy...@apache.org.
YARN-8333. Load balance YARN services using RegistryDNS multiple A records. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bc92e30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bc92e30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bc92e30

Branch: refs/heads/HDDS-4
Commit: 6bc92e304fe05e80f13830104d1fd2c59da8344b
Parents: 6468071
Author: Billie Rinaldi <bi...@apache.org>
Authored: Thu May 31 06:46:34 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Thu May 31 06:46:34 2018 -0700

----------------------------------------------------------------------
 .../server/dns/BaseServiceRecordProcessor.java  | 20 +++++++
 .../dns/ContainerServiceRecordProcessor.java    |  3 +-
 .../registry/server/dns/TestRegistryDNS.java    | 62 ++++++++++++++++----
 .../markdown/yarn-service/ServiceDiscovery.md   | 14 ++++-
 4 files changed, 84 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc92e30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
index 51ae99a..f30c0c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
@@ -290,6 +290,26 @@ public abstract class BaseServiceRecordProcessor
           domain));
     }
 
+    /**
+     * Return the DNS name constructed from the component name.
+     *
+     * @return the DNS naem.
+     * @throws PathNotFoundException
+     * @throws TextParseException
+     */
+    protected Name getComponentName()
+        throws PathNotFoundException, TextParseException {
+      String service = RegistryPathUtils.lastPathEntry(
+          RegistryPathUtils.parentOf(RegistryPathUtils.parentOf(getPath())));
+      String component = getRecord().get("yarn:component").toLowerCase();
+      String user = RegistryPathUtils.getUsername(getPath());
+      return Name.fromString(MessageFormat.format("{0}.{1}.{2}.{3}",
+          component,
+          service,
+          user,
+          domain));
+    }
+
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc92e30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
index 2e95f54..e40a177 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
@@ -242,7 +242,8 @@ public class ContainerServiceRecordProcessor extends
       }
       try {
         this.setTarget(InetAddress.getByName(ip));
-        this.setNames(new Name[] {getContainerName(), getContainerIDName()});
+        this.setNames(new Name[] {getContainerName(), getContainerIDName(),
+            getComponentName()});
       } catch (Exception e) {
         throw new IllegalStateException(e);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc92e30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
index bce73ad..01adc45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
@@ -115,32 +115,47 @@ public class TestRegistryDNS extends Assert {
       + "}\n";
   static final String CONTAINER_RECORD = "{\n"
       + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"COMP-NAME\",\n"
+      + "  \"description\" : \"httpd-1\",\n"
       + "  \"external\" : [ ],\n"
       + "  \"internal\" : [ ],\n"
       + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
       + "  \"yarn:persistence\" : \"container\",\n"
       + "  \"yarn:ip\" : \"172.17.0.19\",\n"
-      + "  \"yarn:hostname\" : \"0a134d6329ba\"\n"
+      + "  \"yarn:hostname\" : \"host1\",\n"
+      + "  \"yarn:component\" : \"httpd\"\n"
+      + "}\n";
+
+  static final String CONTAINER_RECORD2 = "{\n"
+      + "  \"type\" : \"JSONServiceRecord\",\n"
+      + "  \"description\" : \"httpd-2\",\n"
+      + "  \"external\" : [ ],\n"
+      + "  \"internal\" : [ ],\n"
+      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
+      + "  \"yarn:persistence\" : \"container\",\n"
+      + "  \"yarn:ip\" : \"172.17.0.20\",\n"
+      + "  \"yarn:hostname\" : \"host2\",\n"
+      + "  \"yarn:component\" : \"httpd\"\n"
       + "}\n";
 
   private static final String CONTAINER_RECORD_NO_IP = "{\n"
       + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"COMP-NAME\",\n"
+      + "  \"description\" : \"httpd-1\",\n"
       + "  \"external\" : [ ],\n"
       + "  \"internal\" : [ ],\n"
       + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
-      + "  \"yarn:persistence\" : \"container\"\n"
+      + "  \"yarn:persistence\" : \"container\",\n"
+      + "  \"yarn:component\" : \"httpd\"\n"
       + "}\n";
 
   private static final String CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT = "{\n"
       + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"COMP-NAME\",\n"
+      + "  \"description\" : \"httpd-1\",\n"
       + "  \"external\" : [ ],\n"
       + "  \"internal\" : [ ],\n"
       + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
       + "  \"yarn:ip\" : \"172.17.0.19\",\n"
-      + "  \"yarn:hostname\" : \"0a134d6329bb\"\n"
+      + "  \"yarn:hostname\" : \"0a134d6329bb\",\n"
+      + "  \"yarn:component\" : \"httpd\""
       + "}\n";
 
   @Before
@@ -229,7 +244,7 @@ public class TestRegistryDNS extends Assert {
     assertEquals("wrong result", "172.17.0.19",
         ((ARecord) recs[0]).getAddress().getHostAddress());
 
-    recs = assertDNSQuery("comp-name.test1.root.dev.test.", 1);
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
     assertTrue("not an ARecord", recs[0] instanceof ARecord);
   }
 
@@ -268,7 +283,7 @@ public class TestRegistryDNS extends Assert {
         ((ARecord) recs[0]).getAddress().getHostAddress());
     assertEquals("wrong ttl", 30L, recs[0].getTTL());
 
-    recs = assertDNSQuery("comp-name.test1.root.dev.test.", 1);
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
     assertTrue("not an ARecord", recs[0] instanceof ARecord);
 
     assertEquals("wrong ttl", 30L, recs[0].getTTL());
@@ -286,7 +301,7 @@ public class TestRegistryDNS extends Assert {
     // start assessing whether correct records are available
     Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
     assertEquals("wrong result",
-        "comp-name.test1.root.dev.test.",
+        "httpd-1.test1.root.dev.test.",
         ((PTRRecord) recs[0]).getTarget().toString());
   }
 
@@ -312,7 +327,7 @@ public class TestRegistryDNS extends Assert {
     // start assessing whether correct records are available
     Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
     assertEquals("wrong result",
-        "comp-name.test1.root.dev.test.",
+        "httpd-1.test1.root.dev.test.",
         ((PTRRecord) recs[0]).getTarget().toString());
   }
 
@@ -490,7 +505,7 @@ public class TestRegistryDNS extends Assert {
     assertEquals("wrong result", "172.17.0.19",
         ((AAAARecord) recs[0]).getAddress().getHostAddress());
 
-    recs = assertDNSQuery("comp-name.test1.root.dev.test.", Type.AAAA, 1);
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", Type.AAAA, 1);
     assertTrue("not an ARecord", recs[0] instanceof AAAARecord);
   }
 
@@ -565,13 +580,13 @@ public class TestRegistryDNS extends Assert {
     assertEquals("wrong result", "172.17.0.19",
         ((ARecord) recs[0]).getAddress().getHostAddress());
 
-    recs = assertDNSQuery("comp-name.test1.root.dev.test.", 1);
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
     assertTrue("not an ARecord", recs[0] instanceof ARecord);
 
     // lookup dyanmic reverse records
     recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
     assertEquals("wrong result",
-        "comp-name.test1.root.dev.test.",
+        "httpd-1.test1.root.dev.test.",
         ((PTRRecord) recs[0]).getTarget().toString());
 
     // now lookup static reverse records
@@ -649,6 +664,27 @@ public class TestRegistryDNS extends Assert {
         assertDNSQueryNotNull("mail.yahoo.com.", Type.CNAME);
   }
 
+  @Test
+  public void testMultiARecord() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    ServiceRecord record2 = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD2.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000003",
+        record2);
+
+    // start assessing whether correct records are available
+    Record[] recs =
+        assertDNSQuery("httpd.test1.root.dev.test.", 2);
+    assertTrue("not an ARecord", recs[0] instanceof ARecord);
+    assertTrue("not an ARecord", recs[1] instanceof ARecord);
+  }
   public RegistryDNS getRegistryDNS() {
     return registryDNS;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc92e30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
index f351e23..7ee16dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
@@ -65,6 +65,18 @@ Note that YARN service framework assigns `COMPONENT_INSTANCE_NAME` for each cont
 assigned `0` since it is the first and only instance for the `hbasemaster` component. In case of `regionserver` component, it can have multiple containers
  and so be named as such: `regionserver-0`, `regionserver-1`, `regionserver-2` ... etc
 
+Each YARN service component also has Multi-A Records for container fault tolerance or load balancing via RegistryDNS.  The naming format is defined as:
+```
+${COMPONENT_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}
+```
+
+For example, a component named www for application app launched by Chuck with 3 containers will have DNS records that look like:
+```
+www.app.chuck.example.com IN A 123.123.123.1
+www.app.chuck.example.com IN A 123.123.123.1
+www.app.chuck.example.com IN A 123.123.123.1
+```
+
 `Disclaimer`: The DNS implementation is still experimental. It should not be used as a fully-functional DNS.
 
 
@@ -140,4 +152,4 @@ You can edit the `/etc/resolv.conf` to make your system use the registry DNS suc
 ```
 nameserver 192.168.154.3
 ```
-Alternatively, if you have a corporate DNS in your organization, you can configure zone forwarding so that the Registry DNS resolves hostnames for the domain used by the cluster.
\ No newline at end of file
+Alternatively, if you have a corporate DNS in your organization, you can configure zone forwarding so that the Registry DNS resolves hostnames for the domain used by the cluster.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dbf4f01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dbf4f01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dbf4f01

Branch: refs/heads/HDDS-4
Commit: 9dbf4f01665d5480a70395a24519cbab5d4db0c5
Parents: 91d7c74
Author: Inigo Goiri <in...@apache.org>
Authored: Mon May 28 16:34:02 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon May 28 16:34:02 2018 -0700

----------------------------------------------------------------------
 .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java    | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dbf4f01/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index e82863a..c352dc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -2829,11 +2829,11 @@ public class TestDFSShell {
         System.setErr(origErr);
       }
 
-      assertEquals("Error message is not the expected error message",
-          "setrep: Requested replication factor of 1 is less than "
-              + "the required minimum of 2 for /tmp/TestDFSShell-"
-              + "testSetrepLow/testFileForSetrepLow\n",
-          bao.toString());
+      assertTrue("Error message is not the expected error message"
+          + bao.toString(), bao.toString().startsWith(
+              "setrep: Requested replication factor of 1 is less than "
+                  + "the required minimum of 2 for /tmp/TestDFSShell-"
+                  + "testSetrepLow/testFileForSetrepLow"));
     } finally {
       shell.close();
       cluster.shutdown();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d25289
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d25289
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d25289

Branch: refs/heads/HDDS-4
Commit: 13d25289076b39daf481fb1ee15939dbfe4a6b23
Parents: 8733012
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 25 13:32:34 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 25 13:32:34 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/TestAuditLoggerWithCommands.java       | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d25289/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index 41ee03f..222a1de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -1264,8 +1264,9 @@ public class TestAuditLoggerWithCommands {
   }
 
   private int verifyAuditLogs(String pattern) {
-    int length = auditlog.getOutput().split("\n").length;
-    String lastAudit = auditlog.getOutput().split("\n")[length - 1];
+    int length = auditlog.getOutput().split(System.lineSeparator()).length;
+    String lastAudit = auditlog.getOutput()
+        .split(System.lineSeparator())[length - 1];
     assertTrue("Unexpected log!", lastAudit.matches(pattern));
     return length;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh.

Posted by xy...@apache.org.
HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/438ef495
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/438ef495
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/438ef495

Branch: refs/heads/HDDS-4
Commit: 438ef4951a38171f193eaf2631da31d0f4bc3c62
Parents: 8fdc993
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Mon May 28 17:32:32 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Mon May 28 17:32:32 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/438ef495/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index b02f34e..17faec2 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -247,9 +247,9 @@ public class KMSACLs implements Runnable, KeyACLs {
         if (blacklist == null) {
           LOG.debug("No blacklist for {}", type.toString());
         } else if (access) {
-          LOG.debug("user is in {}" , blacklist.getAclString());
-        } else {
           LOG.debug("user is not in {}" , blacklist.getAclString());
+        } else {
+          LOG.debug("user is in {}" , blacklist.getAclString());
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: YARN-8362. Bugfix logic in container retries in node manager. Contributed by Chandni Singh

Posted by xy...@apache.org.
YARN-8362.  Bugfix logic in container retries in node manager.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/135941e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/135941e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/135941e0

Branch: refs/heads/HDDS-4
Commit: 135941e00d762a417c3b4cc524cdc59b0d1810b1
Parents: 2416906
Author: Eric Yang <ey...@apache.org>
Authored: Tue May 29 16:56:58 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Tue May 29 16:56:58 2018 -0400

----------------------------------------------------------------------
 .../container/ContainerImpl.java                |  4 +-
 .../container/SlidingWindowRetryPolicy.java     | 62 +++++++++++---------
 .../container/TestSlidingWindowRetryPolicy.java |  6 ++
 3 files changed, 44 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/135941e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index c09c7f1..5527ac4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -1602,8 +1602,10 @@ public class ContainerImpl implements Container {
         }
         container.addDiagnostics(exitEvent.getDiagnosticInfo() + "\n");
       }
-
       if (container.shouldRetry(container.exitCode)) {
+        // Updates to the retry context should  be protected from concurrent
+        // writes. It should only be called from this transition.
+        container.retryPolicy.updateRetryContext(container.windowRetryContext);
         container.storeRetryContext();
         doRelaunch(container,
             container.windowRetryContext.getRemainingRetries(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/135941e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
index 0208879..36a8b91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
@@ -42,49 +42,40 @@ public class SlidingWindowRetryPolicy {
 
   public boolean shouldRetry(RetryContext retryContext,
       int errorCode) {
-    ContainerRetryContext containerRC = retryContext
-        .containerRetryContext;
+    ContainerRetryContext containerRC = retryContext.containerRetryContext;
     Preconditions.checkNotNull(containerRC, "container retry context null");
     ContainerRetryPolicy retryPolicy = containerRC.getRetryPolicy();
     if (retryPolicy == ContainerRetryPolicy.RETRY_ON_ALL_ERRORS
         || (retryPolicy == ContainerRetryPolicy.RETRY_ON_SPECIFIC_ERROR_CODES
         && containerRC.getErrorCodes() != null
         && containerRC.getErrorCodes().contains(errorCode))) {
-      if (containerRC.getMaxRetries() == ContainerRetryContext.RETRY_FOREVER) {
-        return true;
-      }
-      int pendingRetries = calculatePendingRetries(retryContext);
-      updateRetryContext(retryContext, pendingRetries);
-      return pendingRetries > 0;
+      return containerRC.getMaxRetries() == ContainerRetryContext.RETRY_FOREVER
+          || calculateRemainingRetries(retryContext) > 0;
     }
     return false;
   }
 
   /**
-   * Calculates the pending number of retries.
-   * <p>
-   * When failuresValidityInterval is > 0, it also removes time entries from
-   * <code>restartTimes</code> which are outside the validity interval.
+   * Calculates the remaining number of retries.
    *
-   * @return the pending retries.
+   * @return the remaining retries.
    */
-  private int calculatePendingRetries(RetryContext retryContext) {
+  private int calculateRemainingRetries(RetryContext retryContext) {
     ContainerRetryContext containerRC =
         retryContext.containerRetryContext;
     if (containerRC.getFailuresValidityInterval() > 0) {
-      Iterator<Long> iterator = retryContext.getRestartTimes().iterator();
+      int validFailuresCount = 0;
       long currentTime = clock.getTime();
-      while (iterator.hasNext()) {
-        long restartTime = iterator.next();
+      for (int i = retryContext.restartTimes.size() - 1; i >= 0; i--) {
+        long restartTime = retryContext.restartTimes.get(i);
         if (currentTime - restartTime
-            > containerRC.getFailuresValidityInterval()) {
-          iterator.remove();
+            <= containerRC.getFailuresValidityInterval()) {
+          validFailuresCount++;
         } else {
           break;
         }
       }
-      return containerRC.getMaxRetries() -
-          retryContext.getRestartTimes().size();
+      return containerRC.getMaxRetries() - validFailuresCount;
     } else {
       return retryContext.getRemainingRetries();
     }
@@ -93,13 +84,30 @@ public class SlidingWindowRetryPolicy {
   /**
    * Updates remaining retries and the restart time when
    * required in the retryContext.
+   * <p>
+   * When failuresValidityInterval is > 0, it also removes time entries from
+   * <code>restartTimes</code> which are outside the validity interval.
    */
-  private void updateRetryContext(RetryContext retryContext,
-      int pendingRetries) {
-    retryContext.setRemainingRetries(pendingRetries - 1);
-    if (retryContext.containerRetryContext.getFailuresValidityInterval()
-        > 0) {
-      retryContext.getRestartTimes().add(clock.getTime());
+  protected void updateRetryContext(RetryContext retryContext) {
+    if (retryContext.containerRetryContext.getFailuresValidityInterval() > 0) {
+      ContainerRetryContext containerRC = retryContext.containerRetryContext;
+      Iterator<Long> iterator = retryContext.getRestartTimes().iterator();
+      long currentTime = clock.getTime();
+
+      while (iterator.hasNext()) {
+        long restartTime = iterator.next();
+        if (currentTime - restartTime
+            > containerRC.getFailuresValidityInterval()) {
+          iterator.remove();
+        } else {
+          break;
+        }
+      }
+      retryContext.setRemainingRetries(containerRC.getMaxRetries() -
+          retryContext.restartTimes.size());
+      retryContext.getRestartTimes().add(currentTime);
+    } else {
+      retryContext.remainingRetries--;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/135941e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestSlidingWindowRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestSlidingWindowRetryPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestSlidingWindowRetryPolicy.java
index 04889a9..bacf3bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestSlidingWindowRetryPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestSlidingWindowRetryPolicy.java
@@ -64,12 +64,18 @@ public class TestSlidingWindowRetryPolicy {
         new SlidingWindowRetryPolicy.RetryContext(retryContext);
     Assert.assertTrue("retry 1",
         retryPolicy.shouldRetry(windowRetryContext, 12));
+    retryPolicy.updateRetryContext(windowRetryContext);
+
     clock.setTime(20);
     Assert.assertTrue("retry 2",
         retryPolicy.shouldRetry(windowRetryContext, 12));
+    retryPolicy.updateRetryContext(windowRetryContext);
+
     clock.setTime(40);
     Assert.assertTrue("retry 3",
         retryPolicy.shouldRetry(windowRetryContext, 12));
+    retryPolicy.updateRetryContext(windowRetryContext);
+
     clock.setTime(45);
     Assert.assertFalse("retry failed",
         retryPolicy.shouldRetry(windowRetryContext, 12));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDDS-114. Ozone Datanode mbean registration fails for StorageLocation. Contributed by Elek, Marton.

Posted by xy...@apache.org.
HDDS-114. Ozone Datanode mbean registration fails for StorageLocation.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24169062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24169062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24169062

Branch: refs/heads/HDDS-4
Commit: 24169062e5f4e7798a47c5e6e3e94504cba73092
Parents: 30284d0
Author: Anu Engineer <ae...@apache.org>
Authored: Tue May 29 13:23:58 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue May 29 13:48:55 2018 -0700

----------------------------------------------------------------------
 .../common/impl/StorageLocationReport.java      | 52 +++++++++++---------
 .../ContainerLocationManagerMXBean.java         |  4 +-
 .../interfaces/StorageLocationReportMXBean.java | 40 +++++++++++++++
 3 files changed, 71 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24169062/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index 87b9656..061d09b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.StorageTypeProto;
+import org.apache.hadoop.ozone.container.common.interfaces
+    .StorageLocationReportMXBean;
 
 import java.io.IOException;
 
@@ -30,7 +32,8 @@ import java.io.IOException;
  * Storage location stats of datanodes that provide back store for containers.
  *
  */
-public class StorageLocationReport {
+public final class StorageLocationReport implements
+    StorageLocationReportMXBean {
 
   private final String id;
   private final boolean failed;
@@ -76,6 +79,11 @@ public class StorageLocationReport {
     return storageLocation;
   }
 
+  @Override
+  public String getStorageTypeName() {
+    return storageType.name();
+  }
+
   public StorageType getStorageType() {
     return storageType;
   }
@@ -204,76 +212,76 @@ public class StorageLocationReport {
     /**
      * Sets the storageId.
      *
-     * @param id storageId
+     * @param idValue storageId
      * @return StorageLocationReport.Builder
      */
-    public Builder setId(String id) {
-      this.id = id;
+    public Builder setId(String idValue) {
+      this.id = idValue;
       return this;
     }
 
     /**
      * Sets whether the volume failed or not.
      *
-     * @param failed whether volume failed or not
+     * @param failedValue whether volume failed or not
      * @return StorageLocationReport.Builder
      */
-    public Builder setFailed(boolean failed) {
-      this.failed = failed;
+    public Builder setFailed(boolean failedValue) {
+      this.failed = failedValue;
       return this;
     }
 
     /**
      * Sets the capacity of volume.
      *
-     * @param capacity capacity
+     * @param capacityValue capacity
      * @return StorageLocationReport.Builder
      */
-    public Builder setCapacity(long capacity) {
-      this.capacity = capacity;
+    public Builder setCapacity(long capacityValue) {
+      this.capacity = capacityValue;
       return this;
     }
     /**
      * Sets the scmUsed Value.
      *
-     * @param scmUsed storage space used by scm
+     * @param scmUsedValue storage space used by scm
      * @return StorageLocationReport.Builder
      */
-    public Builder setScmUsed(long scmUsed) {
-      this.scmUsed = scmUsed;
+    public Builder setScmUsed(long scmUsedValue) {
+      this.scmUsed = scmUsedValue;
       return this;
     }
 
     /**
      * Sets the remaining free space value.
      *
-     * @param remaining remaining free space
+     * @param remainingValue remaining free space
      * @return StorageLocationReport.Builder
      */
-    public Builder setRemaining(long remaining) {
-      this.remaining = remaining;
+    public Builder setRemaining(long remainingValue) {
+      this.remaining = remainingValue;
       return this;
     }
 
     /**
      * Sets the storageType.
      *
-     * @param storageType type of the storage used
+     * @param storageTypeValue type of the storage used
      * @return StorageLocationReport.Builder
      */
-    public Builder setStorageType(StorageType storageType) {
-      this.storageType = storageType;
+    public Builder setStorageType(StorageType storageTypeValue) {
+      this.storageType = storageTypeValue;
       return this;
     }
 
     /**
      * Sets the storageLocation.
      *
-     * @param storageLocation location of the volume
+     * @param storageLocationValue location of the volume
      * @return StorageLocationReport.Builder
      */
-    public Builder setStorageLocation(String storageLocation) {
-      this.storageLocation = storageLocation;
+    public Builder setStorageLocation(String storageLocationValue) {
+      this.storageLocation = storageLocationValue;
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24169062/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java
index 88e6148..97d2dc3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.ozone.container.common.interfaces;
 
-import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-
 import java.io.IOException;
 
 /**
@@ -31,6 +29,6 @@ public interface ContainerLocationManagerMXBean {
    *
    * @return storage location usage report.
    */
-  StorageLocationReport[] getLocationReport() throws IOException;
+  StorageLocationReportMXBean[] getLocationReport() throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24169062/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
new file mode 100644
index 0000000..fd06367
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.interfaces;
+
+/**
+ * Contract to define properties available on the JMX interface.
+ */
+public interface StorageLocationReportMXBean {
+
+  String getId();
+
+  boolean isFailed();
+
+  long getCapacity();
+
+  long getScmUsed();
+
+  long getRemaining();
+
+  String getStorageLocation();
+
+  String getStorageTypeName();
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HDFS-13629. Some tests in TestDiskBalancerCommand fail on Windows due to MiniDFSCluster path conflict and improper path usage. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HDFS-13629. Some tests in TestDiskBalancerCommand fail on Windows due to MiniDFSCluster path conflict and improper path usage. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47c31ff1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47c31ff1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47c31ff1

Branch: refs/heads/HDDS-4
Commit: 47c31ff16b452d47afc6ffc1cf936ac2de9b788d
Parents: 8197b9b
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 30 10:22:04 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 30 10:22:04 2018 -0700

----------------------------------------------------------------------
 .../server/diskbalancer/DiskBalancerTestUtil.java    |  5 ++++-
 .../command/TestDiskBalancerCommand.java             | 15 +++++++--------
 2 files changed, 11 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47c31ff1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
index bd8dbce..fef9c63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 
 import org.slf4j.Logger;
@@ -46,6 +47,7 @@ import org.slf4j.LoggerFactory;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.Random;
 import java.util.UUID;
@@ -307,7 +309,8 @@ public class DiskBalancerTestUtil {
         "need to specify capacities for two storages.");
 
     // Write a file and restart the cluster
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+    File basedir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, basedir)
         .numDataNodes(numDatanodes)
         .storageCapacities(storageCapacities)
         .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47c31ff1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 8266c1f..dee2a90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -615,15 +615,15 @@ public class TestDiskBalancerCommand {
     assertThat(
         outputs.get(3),
         is(allOf(containsString("DISK"),
-            containsString(cluster.getInstanceStorageDir(0, 0)
-                .getAbsolutePath()),
+            containsString(new Path(cluster.getInstanceStorageDir(0, 0)
+                .getAbsolutePath()).toString()),
             containsString("0.00"),
             containsString("1.00"))));
     assertThat(
         outputs.get(4),
         is(allOf(containsString("DISK"),
-            containsString(cluster.getInstanceStorageDir(0, 1)
-                .getAbsolutePath()),
+            containsString(new Path(cluster.getInstanceStorageDir(0, 1)
+                .getAbsolutePath()).toString()),
             containsString("0.00"),
             containsString("1.00"))));
   }
@@ -719,9 +719,7 @@ public class TestDiskBalancerCommand {
   @Test
   public void testPrintFullPathOfPlan()
       throws Exception {
-    final Path parent = new Path(
-        PathUtils.getTestPath(getClass()),
-        GenericTestUtils.getMethodName());
+    String parent = GenericTestUtils.getRandomizedTempPath();
 
     MiniDFSCluster miniCluster = null;
     try {
@@ -815,7 +813,8 @@ public class TestDiskBalancerCommand {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
     final int numDatanodes = 2;
-    MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf)
+    File basedir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf, basedir)
         .numDataNodes(numDatanodes).build();
     try {
       miniDFSCluster.waitActive();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
deleted file mode 100644
index 50fd18f..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import com.google.common.primitives.Longs;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-
-/**
- * This class  manages the state of datanode
- * in conjunction with the node pool and node managers.
- */
-public class ReplicationDatanodeStateManager {
-  private final NodeManager nodeManager;
-  private final NodePoolManager poolManager;
-  private final Random r;
-
-  /**
-   * The datanode state Manager.
-   *
-   * @param nodeManager
-   * @param poolManager
-   */
-  public ReplicationDatanodeStateManager(NodeManager nodeManager,
-      NodePoolManager poolManager) {
-    this.nodeManager = nodeManager;
-    this.poolManager = poolManager;
-    r = new Random();
-  }
-
-  /**
-   * Get Container Report as if it is from a datanode in the cluster.
-   * @param containerID - Container ID.
-   * @param poolName - Pool Name.
-   * @param dataNodeCount - Datanode Count.
-   * @return List of Container Reports.
-   */
-  public List<ContainerReportsRequestProto> getContainerReport(
-      long containerID, String poolName, int dataNodeCount) {
-    List<ContainerReportsRequestProto> containerList = new LinkedList<>();
-    List<DatanodeDetails> nodesInPool = poolManager.getNodes(poolName);
-
-    if (nodesInPool == null) {
-      return containerList;
-    }
-
-    if (nodesInPool.size() < dataNodeCount) {
-      throw new IllegalStateException("Not enough datanodes to create " +
-          "required container reports");
-    }
-
-    while (containerList.size() < dataNodeCount && nodesInPool.size() > 0) {
-      DatanodeDetails id = nodesInPool.get(r.nextInt(nodesInPool.size()));
-      nodesInPool.remove(id);
-      containerID++;
-      // We return container reports only for nodes that are healthy.
-      if (nodeManager.getNodeState(id) == HEALTHY) {
-        ContainerInfo info = ContainerInfo.newBuilder()
-            .setContainerID(containerID)
-            .setFinalhash(DigestUtils.sha256Hex(
-                Longs.toByteArray(containerID)))
-            .setContainerID(containerID)
-            .build();
-        ContainerReportsRequestProto containerReport =
-            ContainerReportsRequestProto.newBuilder().addReports(info)
-            .setDatanodeDetails(id.getProtoBufMessage())
-            .setType(ContainerReportsRequestProto.reportType.fullReport)
-            .build();
-        containerList.add(containerReport);
-      }
-    }
-    return containerList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 3f814d0..072d821 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -24,13 +24,13 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodePoolManager;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.mockito.Mockito;
 
@@ -277,12 +277,12 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * Register the node if the node finds that it is not registered with any SCM.
    *
    * @param dd DatanodeDetailsProto
-   * @param nodeReport SCMNodeReport
+   * @param nodeReport NodeReportProto
    * @return SCMHeartbeatResponseProto
    */
   @Override
-  public SCMCommand register(HddsProtos.DatanodeDetailsProto dd,
-                             SCMNodeReport nodeReport) {
+  public RegisteredCommand register(DatanodeDetails dd,
+                                    NodeReportProto nodeReport) {
     return null;
   }
 
@@ -294,8 +294,8 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * @return SCMheartbeat response list
    */
   @Override
-  public List<SCMCommand> sendHeartbeat(HddsProtos.DatanodeDetailsProto dd,
-      SCMNodeReport nodeReport) {
+  public List<SCMCommand> sendHeartbeat(DatanodeDetails dd,
+      NodeReportProto nodeReport) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index a0d41a8..0c1d8f2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -32,8 +32,10 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager.StartupOption;
 import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
@@ -302,12 +304,11 @@ public class TestStorageContainerManager {
       NodeManager nodeManager = cluster.getStorageContainerManager()
           .getScmNodeManager();
       List<SCMCommand> commands = nodeManager.sendHeartbeat(
-          nodeManager.getNodes(NodeState.HEALTHY).get(0).getProtoBufMessage(),
-          null);
+          nodeManager.getNodes(NodeState.HEALTHY).get(0), null);
 
       if (commands != null) {
         for (SCMCommand cmd : commands) {
-          if (cmd.getType() == SCMCmdType.deleteBlocksCommand) {
+          if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
             List<DeletedBlocksTransaction> deletedTXs =
                 ((DeleteBlocksCommand) cmd).blocksTobeDeleted();
             return deletedTXs != null && deletedTXs.size() == limitSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
index 1d19bb3..1dbe760 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
@@ -32,8 +32,10 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
@@ -75,11 +77,11 @@ public class TestSCMMetrics {
       ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes,
           writeBytes, readCount, writeCount);
       StorageContainerManager scmManager = cluster.getStorageContainerManager();
-
-      ContainerReportsRequestProto request = createContainerReport(numReport,
-          stat, null);
-      String fstDatanodeUuid = request.getDatanodeDetails().getUuid();
-      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
+      DatanodeDetails fstDatanodeDetails = TestUtils.getDatanodeDetails();
+      ContainerReportsProto request = createContainerReport(numReport, stat);
+      String fstDatanodeUuid = fstDatanodeDetails.getUuidString();
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          fstDatanodeDetails, request);
 
       // verify container stat metrics
       MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
@@ -100,9 +102,11 @@ public class TestSCMMetrics {
           getLongGauge("LastContainerReportWriteCount", scmMetrics));
 
       // add one new report
-      request = createContainerReport(1, stat, null);
-      String sndDatanodeUuid = request.getDatanodeDetails().getUuid();
-      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
+      DatanodeDetails sndDatanodeDetails = TestUtils.getDatanodeDetails();
+      request = createContainerReport(1, stat);
+      String sndDatanodeUuid = sndDatanodeDetails.getUuidString();
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          sndDatanodeDetails, request);
 
       scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
       assertEquals(size * (numReport + 1),
@@ -124,12 +128,12 @@ public class TestSCMMetrics {
       // Re-send reports but with different value for validating
       // the aggregation.
       stat = new ContainerStat(100, 50, 3, 50, 60, 5, 6);
-      scmManager.getDatanodeProtocolServer().sendContainerReport(
-          createContainerReport(1, stat, fstDatanodeUuid));
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          fstDatanodeDetails, createContainerReport(1, stat));
 
       stat = new ContainerStat(1, 1, 1, 1, 1, 1, 1);
-      scmManager.getDatanodeProtocolServer().sendContainerReport(
-          createContainerReport(1, stat, sndDatanodeUuid));
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          sndDatanodeDetails, createContainerReport(1, stat));
 
       // the global container metrics value should be updated
       scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
@@ -170,11 +174,11 @@ public class TestSCMMetrics {
           writeBytes, readCount, writeCount);
       StorageContainerManager scmManager = cluster.getStorageContainerManager();
 
-      String datanodeUuid = cluster.getHddsDatanodes().get(0)
-          .getDatanodeDetails().getUuidString();
-      ContainerReportsRequestProto request = createContainerReport(numReport,
-          stat, datanodeUuid);
-      scmManager.getDatanodeProtocolServer().sendContainerReport(request);
+      DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
+          .getDatanodeDetails();
+      ContainerReportsProto request = createContainerReport(numReport, stat);
+      scmManager.getDatanodeProtocolServer().processContainerReports(
+          datanodeDetails, request);
 
       MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
       assertEquals(size * numReport,
@@ -216,11 +220,11 @@ public class TestSCMMetrics {
     }
   }
 
-  private ContainerReportsRequestProto createContainerReport(int numReport,
-      ContainerStat stat, String datanodeUuid) {
-    StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto.Builder
+  private ContainerReportsProto createContainerReport(int numReport,
+      ContainerStat stat) {
+    StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder
         reportsBuilder = StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.newBuilder();
+        .ContainerReportsProto.newBuilder();
 
     for (int i = 0; i < numReport; i++) {
       ContainerReport report = new ContainerReport(
@@ -234,24 +238,6 @@ public class TestSCMMetrics {
       report.setWriteBytes(stat.getWriteBytes().get());
       reportsBuilder.addReports(report.getProtoBufMessage());
     }
-
-    DatanodeDetails datanodeDetails;
-    if (datanodeUuid == null) {
-      datanodeDetails = TestUtils.getDatanodeDetails();
-    } else {
-      datanodeDetails = DatanodeDetails.newBuilder()
-          .setUuid(datanodeUuid)
-          .setIpAddress("127.0.0.1")
-          .setHostName("localhost")
-          .setContainerPort(0)
-          .setRatisPort(0)
-          .setOzoneRestPort(0)
-          .build();
-    }
-
-    reportsBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage());
-    reportsBuilder.setType(StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.reportType.fullReport);
     return reportsBuilder.build();
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

Posted by xy...@apache.org.
HDDS-81. Moving ContainerReport inside Datanode heartbeat.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/201440b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/201440b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/201440b9

Branch: refs/heads/HDDS-4
Commit: 201440b987d5ef3910c2045b2411c213ed6eec1f
Parents: 4827e9a
Author: Anu Engineer <ae...@apache.org>
Authored: Tue May 29 12:40:27 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue May 29 12:48:50 2018 -0700

----------------------------------------------------------------------
 .../common/impl/ContainerManagerImpl.java       |  22 +-
 .../common/impl/StorageLocationReport.java      |   8 +-
 .../common/interfaces/ContainerManager.java     |   8 +-
 .../statemachine/DatanodeStateMachine.java      |   7 +-
 .../common/statemachine/StateContext.java       |  16 +-
 .../CloseContainerCommandHandler.java           | 113 ++++++++
 .../commandhandler/CloseContainerHandler.java   | 113 --------
 .../commandhandler/CommandDispatcher.java       |   5 +-
 .../commandhandler/CommandHandler.java          |   8 +-
 .../DeleteBlocksCommandHandler.java             |  12 +-
 .../states/endpoint/HeartbeatEndpointTask.java  |  30 +-
 .../states/endpoint/RegisterEndpointTask.java   |  12 +-
 .../container/ozoneimpl/OzoneContainer.java     |  10 +-
 .../StorageContainerDatanodeProtocol.java       |  30 +-
 .../protocol/StorageContainerNodeProtocol.java  |  15 +-
 .../commands/CloseContainerCommand.java         |  18 +-
 .../protocol/commands/DeleteBlocksCommand.java  |  18 +-
 .../protocol/commands/RegisteredCommand.java    |  26 +-
 .../protocol/commands/ReregisterCommand.java    |  16 +-
 .../ozone/protocol/commands/SCMCommand.java     |   4 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |  50 +---
 ...rDatanodeProtocolServerSideTranslatorPB.java |  53 ++--
 .../StorageContainerDatanodeProtocol.proto      | 256 ++++++++---------
 .../ozone/container/common/ScmTestMock.java     |  78 ++----
 .../hdds/scm/container/ContainerMapping.java    |  10 +-
 .../hadoop/hdds/scm/container/Mapping.java      |   6 +-
 .../replication/ContainerSupervisor.java        |  13 +-
 .../container/replication/InProgressPool.java   |  15 +-
 .../hdds/scm/node/HeartbeatQueueItem.java       |  14 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |  58 ++--
 .../hdds/scm/node/SCMNodeStorageStatMap.java    |  14 +-
 .../scm/server/SCMDatanodeProtocolServer.java   | 195 +++++++------
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  19 +-
 .../hdds/scm/container/MockNodeManager.java     |  26 +-
 .../scm/container/TestContainerMapping.java     |  24 +-
 .../container/closer/TestContainerCloser.java   |  12 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   6 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  83 +++---
 .../scm/node/TestSCMNodeStorageStatMap.java     |  16 +-
 .../ozone/container/common/TestEndPoint.java    | 113 ++------
 .../replication/TestContainerSupervisor.java    | 275 -------------------
 .../ReplicationDatanodeStateManager.java        | 101 -------
 .../testutils/ReplicationNodeManagerMock.java   |  14 +-
 .../ozone/TestStorageContainerManager.java      |  11 +-
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |  68 ++---
 45 files changed, 706 insertions(+), 1315 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 9355364..af47015 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,11 +35,11 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -854,11 +854,11 @@ public class ContainerManagerImpl implements ContainerManager {
    * @return node report.
    */
   @Override
-  public SCMNodeReport getNodeReport() throws IOException {
+  public NodeReportProto getNodeReport() throws IOException {
     StorageLocationReport[] reports = locationManager.getLocationReport();
-    SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
+    NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
     for (int i = 0; i < reports.length; i++) {
-      SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
+      StorageReportProto.Builder srb = StorageReportProto.newBuilder();
       nrb.addStorageReport(reports[i].getProtoBufMessage());
     }
     return nrb.build();
@@ -891,7 +891,7 @@ public class ContainerManagerImpl implements ContainerManager {
    * @throws IOException
    */
   @Override
-  public ContainerReportsRequestProto getContainerReport() throws IOException {
+  public ContainerReportsProto getContainerReport() throws IOException {
     LOG.debug("Starting container report iteration.");
     // No need for locking since containerMap is a ConcurrentSkipListMap
     // And we can never get the exact state since close might happen
@@ -899,12 +899,8 @@ public class ContainerManagerImpl implements ContainerManager {
     List<ContainerData> containers = containerMap.values().stream()
         .collect(Collectors.toList());
 
-    ContainerReportsRequestProto.Builder crBuilder =
-        ContainerReportsRequestProto.newBuilder();
-
-    // TODO: support delta based container report
-    crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-        .setType(ContainerReportsRequestProto.reportType.fullReport);
+    ContainerReportsProto.Builder crBuilder =
+        ContainerReportsProto.newBuilder();
 
     for (ContainerData container: containers) {
       long containerId = container.getContainerID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index a5ad6c2..87b9656 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.common.impl;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 
@@ -137,8 +137,8 @@ public class StorageLocationReport {
    * @return SCMStorageReport
    * @throws IOException In case, the storage type specified is invalid.
    */
-  public SCMStorageReport getProtoBufMessage() throws IOException{
-    SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
+  public StorageReportProto getProtoBufMessage() throws IOException{
+    StorageReportProto.Builder srb = StorageReportProto.newBuilder();
     return srb.setStorageUuid(getId())
         .setCapacity(getCapacity())
         .setScmUsed(getScmUsed())
@@ -156,7 +156,7 @@ public class StorageLocationReport {
    * @throws IOException in case of invalid storage type
    */
 
-  public static StorageLocationReport getFromProtobuf(SCMStorageReport report)
+  public static StorageLocationReport getFromProtobuf(StorageReportProto report)
       throws IOException {
     StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
     builder.setId(report.getStorageUuid())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
index ba70953..49b68dc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
@@ -27,9 +27,9 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 
 import java.io.IOException;
@@ -171,14 +171,14 @@ public interface ContainerManager extends RwLock {
    * Get the Node Report of container storage usage.
    * @return node report.
    */
-  SCMNodeReport getNodeReport() throws IOException;
+  NodeReportProto getNodeReport() throws IOException;
 
   /**
    * Gets container report.
    * @return container report.
    * @throws IOException
    */
-  ContainerReportsRequestProto getContainerReport() throws IOException;
+  ContainerReportsProto getContainerReport() throws IOException;
 
   /**
    * Gets container reports.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index a8fe494..d0a4217 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -21,8 +21,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .CloseContainerHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
     .CommandDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
@@ -86,7 +85,7 @@ public class DatanodeStateMachine implements Closeable {
      // When we add new handlers just adding a new handler here should do the
      // trick.
     commandDispatcher = CommandDispatcher.newBuilder()
-        .addHandler(new CloseContainerHandler())
+        .addHandler(new CloseContainerCommandHandler())
         .addHandler(new DeleteBlocksCommandHandler(
             container.getContainerManager(), conf))
         .setConnectionManager(connectionManager)
@@ -131,7 +130,7 @@ public class DatanodeStateMachine implements Closeable {
       try {
         LOG.debug("Executing cycle Number : {}", context.getExecutionCount());
         nextHB.set(Time.monotonicNow() + heartbeatFrequency);
-        context.setReportState(container.getNodeReport());
+        context.setNodeReport(container.getNodeReport());
         context.execute(executorService, heartbeatFrequency,
             TimeUnit.MILLISECONDS);
         now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 27eb57e..4e3c610 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.statemachine;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
 import org.apache.hadoop.ozone.container.common.states.datanode
     .InitDatanodeState;
@@ -52,7 +52,7 @@ public class StateContext {
   private final AtomicLong stateExecutionCount;
   private final Configuration conf;
   private DatanodeStateMachine.DatanodeStates state;
-  private SCMNodeReport nrState;
+  private NodeReportProto dnReport;
 
   /**
    * Constructs a StateContext.
@@ -69,7 +69,7 @@ public class StateContext {
     commandQueue = new LinkedList<>();
     lock = new ReentrantLock();
     stateExecutionCount = new AtomicLong(0);
-    nrState = SCMNodeReport.getDefaultInstance();
+    dnReport = NodeReportProto.getDefaultInstance();
   }
 
   /**
@@ -144,16 +144,16 @@ public class StateContext {
    * Returns the node report of the datanode state context.
    * @return the node report.
    */
-  public SCMNodeReport getNodeReport() {
-    return nrState;
+  public NodeReportProto getNodeReport() {
+    return dnReport;
   }
 
   /**
    * Sets the storage location report of the datanode state context.
-   * @param nrReport - node report
+   * @param nodeReport node report
    */
-  public void setReportState(SCMNodeReport nrReport) {
-    this.nrState = nrReport;
+  public void setNodeReport(NodeReportProto nodeReport) {
+    this.dnReport = nodeReport;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
new file mode 100644
index 0000000..e8c602d
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .SCMConnectionManager;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handler for close container command received from SCM.
+ */
+public class CloseContainerCommandHandler implements CommandHandler {
+  static final Logger LOG =
+      LoggerFactory.getLogger(CloseContainerCommandHandler.class);
+  private int invocationCount;
+  private long totalTime;
+
+  /**
+   * Constructs a ContainerReport handler.
+   */
+  public CloseContainerCommandHandler() {
+  }
+
+  /**
+   * Handles a given SCM command.
+   *
+   * @param command           - SCM Command
+   * @param container         - Ozone Container.
+   * @param context           - Current Context.
+   * @param connectionManager - The SCMs that we are talking to.
+   */
+  @Override
+  public void handle(SCMCommand command, OzoneContainer container,
+      StateContext context, SCMConnectionManager connectionManager) {
+    LOG.debug("Processing Close Container command.");
+    invocationCount++;
+    long startTime = Time.monotonicNow();
+    // TODO: define this as INVALID_CONTAINER_ID in HddsConsts.java (TBA)
+    long containerID = -1;
+    try {
+
+      CloseContainerCommandProto
+          closeContainerProto =
+          CloseContainerCommandProto
+              .parseFrom(command.getProtoBufMessage());
+      containerID = closeContainerProto.getContainerID();
+
+      container.getContainerManager().closeContainer(containerID);
+
+    } catch (Exception e) {
+      LOG.error("Can't close container " + containerID, e);
+    } finally {
+      long endTime = Time.monotonicNow();
+      totalTime += endTime - startTime;
+    }
+  }
+
+  /**
+   * Returns the command type that this command handler handles.
+   *
+   * @return Type
+   */
+  @Override
+  public SCMCommandProto.Type getCommandType() {
+    return SCMCommandProto.Type.closeContainerCommand;
+  }
+
+  /**
+   * Returns number of times this handler has been invoked.
+   *
+   * @return int
+   */
+  @Override
+  public int getInvocationCount() {
+    return invocationCount;
+  }
+
+  /**
+   * Returns the average time this function takes to run.
+   *
+   * @return long
+   */
+  @Override
+  public long getAverageRunTime() {
+    if (invocationCount > 0) {
+      return totalTime / invocationCount;
+    }
+    return 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java
deleted file mode 100644
index d8adc7d..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCloseContainerCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Container Report handler.
- */
-public class CloseContainerHandler implements CommandHandler {
-  static final Logger LOG =
-      LoggerFactory.getLogger(CloseContainerHandler.class);
-  private int invocationCount;
-  private long totalTime;
-
-  /**
-   * Constructs a ContainerReport handler.
-   */
-  public CloseContainerHandler() {
-  }
-
-  /**
-   * Handles a given SCM command.
-   *
-   * @param command           - SCM Command
-   * @param container         - Ozone Container.
-   * @param context           - Current Context.
-   * @param connectionManager - The SCMs that we are talking to.
-   */
-  @Override
-  public void handle(SCMCommand command, OzoneContainer container,
-      StateContext context, SCMConnectionManager connectionManager) {
-    LOG.debug("Processing Close Container command.");
-    invocationCount++;
-    long startTime = Time.monotonicNow();
-    // TODO: define this as INVALID_CONTAINER_ID in HddsConsts.java (TBA)
-    long containerID = -1;
-    try {
-
-      SCMCloseContainerCmdResponseProto
-          closeContainerProto =
-          SCMCloseContainerCmdResponseProto
-              .parseFrom(command.getProtoBufMessage());
-      containerID = closeContainerProto.getContainerID();
-
-      container.getContainerManager().closeContainer(containerID);
-
-    } catch (Exception e) {
-      LOG.error("Can't close container " + containerID, e);
-    } finally {
-      long endTime = Time.monotonicNow();
-      totalTime += endTime - startTime;
-    }
-  }
-
-  /**
-   * Returns the command type that this command handler handles.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCmdType getCommandType() {
-    return SCMCmdType.closeContainerCommand;
-  }
-
-  /**
-   * Returns number of times this handler has been invoked.
-   *
-   * @return int
-   */
-  @Override
-  public int getInvocationCount() {
-    return invocationCount;
-  }
-
-  /**
-   * Returns the average time this function takes to run.
-   *
-   * @return long
-   */
-  @Override
-  public long getAverageRunTime() {
-    if (invocationCount > 0) {
-      return totalTime / invocationCount;
-    }
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
index 40feca3..aedd78f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
@@ -18,7 +18,8 @@
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
 import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
@@ -38,7 +39,7 @@ public final class CommandDispatcher {
   static final Logger LOG =
       LoggerFactory.getLogger(CommandDispatcher.class);
   private final StateContext context;
-  private final Map<SCMCmdType, CommandHandler> handlerMap;
+  private final Map<Type, CommandHandler> handlerMap;
   private final OzoneContainer container;
   private final SCMConnectionManager connectionManager;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
index 13d9f72..60e2dc4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
@@ -17,8 +17,10 @@
 
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .SCMConnectionManager;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -42,7 +44,7 @@ public interface CommandHandler {
    * Returns the command type that this command handler handles.
    * @return Type
    */
-  SCMCmdType getCommandType();
+  SCMCommandProto.Type getCommandType();
 
   /**
    * Returns number of times this handler has been invoked.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 5231660..ab69bdc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
@@ -26,8 +28,6 @@ import org.apache.hadoop.hdds.protocol.proto
     .DeleteBlockTransactionResult;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers
@@ -73,10 +73,10 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
   @Override
   public void handle(SCMCommand command, OzoneContainer container,
       StateContext context, SCMConnectionManager connectionManager) {
-    if (command.getType() != SCMCmdType.deleteBlocksCommand) {
+    if (command.getType() != SCMCommandProto.Type.deleteBlocksCommand) {
       LOG.warn("Skipping handling command, expected command "
               + "type {} but found {}",
-          SCMCmdType.deleteBlocksCommand, command.getType());
+          SCMCommandProto.Type.deleteBlocksCommand, command.getType());
       return;
     }
     LOG.debug("Processing block deletion command.");
@@ -193,8 +193,8 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
   }
 
   @Override
-  public SCMCmdType getCommandType() {
-    return SCMCmdType.deleteBlocksCommand;
+  public SCMCommandProto.Type getCommandType() {
+    return SCMCommandProto.Type.deleteBlocksCommand;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 01b4c72..337cdfb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -23,7 +23,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.ozone.container.common.helpers
@@ -97,8 +99,13 @@ public class HeartbeatEndpointTask
     try {
       Preconditions.checkState(this.datanodeDetailsProto != null);
 
+      SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
+          .setDatanodeDetails(datanodeDetailsProto)
+          .setNodeReport(context.getNodeReport())
+          .build();
+
       SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
-          .sendHeartbeat(datanodeDetailsProto, this.context.getNodeReport());
+          .sendHeartbeat(request);
       processResponse(reponse, datanodeDetailsProto);
       rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now());
       rpcEndpoint.zeroMissedCount();
@@ -125,13 +132,13 @@ public class HeartbeatEndpointTask
    */
   private void processResponse(SCMHeartbeatResponseProto response,
       final DatanodeDetailsProto datanodeDetails) {
-    for (SCMCommandResponseProto commandResponseProto : response
+    Preconditions.checkState(response.getDatanodeUUID()
+            .equalsIgnoreCase(datanodeDetails.getUuid()),
+        "Unexpected datanode ID in the response.");
+    // Verify the response is indeed for this datanode.
+    for (SCMCommandProto commandResponseProto : response
         .getCommandsList()) {
-      // Verify the response is indeed for this datanode.
-      Preconditions.checkState(commandResponseProto.getDatanodeUUID()
-          .equalsIgnoreCase(datanodeDetails.getUuid()),
-          "Unexpected datanode ID in the response.");
-      switch (commandResponseProto.getCmdType()) {
+      switch (commandResponseProto.getCommandType()) {
       case reregisterCommand:
         if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) {
           if (LOG.isDebugEnabled()) {
@@ -148,7 +155,8 @@ public class HeartbeatEndpointTask
         break;
       case deleteBlocksCommand:
         DeleteBlocksCommand db = DeleteBlocksCommand
-            .getFromProtobuf(commandResponseProto.getDeleteBlocksProto());
+            .getFromProtobuf(
+                commandResponseProto.getDeleteBlocksCommandProto());
         if (!db.blocksTobeDeleted().isEmpty()) {
           if (LOG.isDebugEnabled()) {
             LOG.debug(DeletedContainerBlocksSummary
@@ -161,7 +169,7 @@ public class HeartbeatEndpointTask
       case closeContainerCommand:
         CloseContainerCommand closeContainer =
             CloseContainerCommand.getFromProtobuf(
-                commandResponseProto.getCloseContainerProto());
+                commandResponseProto.getCloseContainerCommandProto());
         if (LOG.isDebugEnabled()) {
           LOG.debug("Received SCM container close request for container {}",
               closeContainer.getContainerID());
@@ -170,7 +178,7 @@ public class HeartbeatEndpointTask
         break;
       default:
         throw new IllegalArgumentException("Unknown response : "
-            + commandResponseProto.getCmdType().name());
+            + commandResponseProto.getCommandType().name());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 77a7084..12b48ab 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -24,11 +24,11 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.container.common.statemachine
     .EndpointStateMachine;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -104,11 +104,11 @@ public final class RegisterEndpointTask implements
     rpcEndPoint.lock();
     try {
 
-      ContainerReportsRequestProto contianerReport = datanodeContainerManager
+      ContainerReportsProto contianerReport = datanodeContainerManager
           .getContainerReport();
-      SCMNodeReport nodeReport = datanodeContainerManager.getNodeReport();
+      NodeReportProto nodeReport = datanodeContainerManager.getNodeReport();
       // TODO : Add responses to the command Queue.
-      SCMRegisteredCmdResponseProto response = rpcEndPoint.getEndPoint()
+      SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint()
           .register(datanodeDetails.getProtoBufMessage(), nodeReport,
               contianerReport);
       Preconditions.checkState(UUID.fromString(response.getDatanodeUUID())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 6758479..b357fef 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -19,14 +19,14 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
@@ -219,7 +219,7 @@ public class OzoneContainer {
   /**
    * Returns node report of container storage usage.
    */
-  public SCMNodeReport getNodeReport() throws IOException {
+  public NodeReportProto getNodeReport() throws IOException {
     return this.manager.getNodeReport();
   }
 
@@ -255,7 +255,7 @@ public class OzoneContainer {
    * @return - container report.
    * @throws IOException
    */
-  public ContainerReportsRequestProto getContainerReport() throws IOException {
+  public ContainerReportsProto getContainerReport() throws IOException {
     return this.manager.getContainerReport();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index e2a3bf5..a950a31 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -19,20 +19,20 @@ package org.apache.hadoop.ozone.protocol;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos
     .ContainerBlocksDeletionACKResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -55,13 +55,12 @@ public interface StorageContainerDatanodeProtocol {
 
   /**
    * Used by data node to send a Heartbeat.
-   * @param datanodeDetails - Datanode Details.
-   * @param nodeReport - node report state
+   * @param heartbeat Heartbeat
    * @return - SCMHeartbeatResponseProto
    * @throws IOException
    */
-  SCMHeartbeatResponseProto sendHeartbeat(DatanodeDetailsProto datanodeDetails,
-      SCMNodeReport nodeReport) throws IOException;
+  SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat)
+      throws IOException;
 
   /**
    * Register Datanode.
@@ -70,20 +69,11 @@ public interface StorageContainerDatanodeProtocol {
    * @param containerReportsRequestProto - Container Reports.
    * @return SCM Command.
    */
-  SCMRegisteredCmdResponseProto register(DatanodeDetailsProto datanodeDetails,
-      SCMNodeReport nodeReport, ContainerReportsRequestProto
+  SCMRegisteredResponseProto register(DatanodeDetailsProto datanodeDetails,
+      NodeReportProto nodeReport, ContainerReportsProto
       containerReportsRequestProto) throws IOException;
 
   /**
-   * Send a container report.
-   * @param reports -- Container report.
-   * @return container reports response.
-   * @throws IOException
-   */
-  ContainerReportsResponseProto sendContainerReport(
-      ContainerReportsRequestProto reports) throws IOException;
-
-  /**
    * Used by datanode to send block deletion ACK to SCM.
    * @param request block deletion transactions.
    * @return block deletion transaction response.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index 14038fb..790f58a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -18,11 +18,12 @@
 package org.apache.hadoop.ozone.protocol;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
 import java.util.List;
@@ -49,11 +50,11 @@ public interface StorageContainerNodeProtocol {
   /**
    * Register the node if the node finds that it is not registered with any SCM.
    * @param datanodeDetails DatanodeDetails
-   * @param nodeReport SCMNodeReport
+   * @param nodeReport NodeReportProto
    * @return  SCMHeartbeatResponseProto
    */
-  SCMCommand register(DatanodeDetailsProto datanodeDetails, SCMNodeReport
-      nodeReport);
+  RegisteredCommand register(DatanodeDetails datanodeDetails,
+                             NodeReportProto nodeReport);
 
   /**
    * Send heartbeat to indicate the datanode is alive and doing well.
@@ -61,7 +62,7 @@ public interface StorageContainerNodeProtocol {
    * @param nodeReport - node report.
    * @return SCMheartbeat response list
    */
-  List<SCMCommand> sendHeartbeat(DatanodeDetailsProto datanodeDetails,
-      SCMNodeReport nodeReport);
+  List<SCMCommand> sendHeartbeat(DatanodeDetails datanodeDetails,
+      NodeReportProto nodeReport);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
index d1d6488..4f4f82b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
@@ -19,18 +19,16 @@ package org.apache.hadoop.ozone.protocol.commands;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCloseContainerCmdResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+    .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
 
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand;
 
 /**
  * Asks datanode to close a container.
  */
 public class CloseContainerCommand
-    extends SCMCommand<SCMCloseContainerCmdResponseProto> {
+    extends SCMCommand<CloseContainerCommandProto> {
 
   private long containerID;
 
@@ -44,8 +42,8 @@ public class CloseContainerCommand
    * @return Type
    */
   @Override
-  public SCMCmdType getType() {
-    return closeContainerCommand;
+  public SCMCommandProto.Type getType() {
+    return SCMCommandProto.Type.closeContainerCommand;
   }
 
   /**
@@ -58,13 +56,13 @@ public class CloseContainerCommand
     return getProto().toByteArray();
   }
 
-  public SCMCloseContainerCmdResponseProto getProto() {
-    return SCMCloseContainerCmdResponseProto.newBuilder()
+  public CloseContainerCommandProto getProto() {
+    return CloseContainerCommandProto.newBuilder()
         .setContainerID(containerID).build();
   }
 
   public static CloseContainerCommand getFromProtobuf(
-      SCMCloseContainerCmdResponseProto closeContainerProto) {
+      CloseContainerCommandProto closeContainerProto) {
     Preconditions.checkNotNull(closeContainerProto);
     return new CloseContainerCommand(closeContainerProto.getContainerID());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
index a11ca25..4fa33f6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
@@ -18,11 +18,11 @@
 package org.apache.hadoop.ozone.protocol.commands;
 
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMDeleteBlocksCmdResponseProto;
+    .StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto;
 
 import java.util.List;
 
@@ -30,7 +30,7 @@ import java.util.List;
  * A SCM command asks a datanode to delete a number of blocks.
  */
 public class DeleteBlocksCommand extends
-    SCMCommand<SCMDeleteBlocksCmdResponseProto> {
+    SCMCommand<DeleteBlocksCommandProto> {
 
   private List<DeletedBlocksTransaction> blocksTobeDeleted;
 
@@ -44,8 +44,8 @@ public class DeleteBlocksCommand extends
   }
 
   @Override
-  public SCMCmdType getType() {
-    return SCMCmdType.deleteBlocksCommand;
+  public SCMCommandProto.Type getType() {
+    return SCMCommandProto.Type.deleteBlocksCommand;
   }
 
   @Override
@@ -54,13 +54,13 @@ public class DeleteBlocksCommand extends
   }
 
   public static DeleteBlocksCommand getFromProtobuf(
-      SCMDeleteBlocksCmdResponseProto deleteBlocksProto) {
+      DeleteBlocksCommandProto deleteBlocksProto) {
     return new DeleteBlocksCommand(deleteBlocksProto
         .getDeletedBlocksTransactionsList());
   }
 
-  public SCMDeleteBlocksCmdResponseProto getProto() {
-    return SCMDeleteBlocksCmdResponseProto.newBuilder()
+  public DeleteBlocksCommandProto getProto() {
+    return DeleteBlocksCommandProto.newBuilder()
         .addAllDeletedBlocksTransactions(blocksTobeDeleted).build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
index 69f2c18..3a5da72 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
@@ -19,18 +19,15 @@ package org.apache.hadoop.ozone.protocol.commands;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
     .ErrorCode;
 
 /**
  * Response to Datanode Register call.
  */
-public class RegisteredCommand extends
-    SCMCommand<SCMRegisteredCmdResponseProto> {
+public class RegisteredCommand {
   private String datanodeUUID;
   private String clusterID;
   private ErrorCode error;
@@ -60,16 +57,6 @@ public class RegisteredCommand extends
   }
 
   /**
-   * Returns the type of this command.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCmdType getType() {
-    return SCMCmdType.registeredCommand;
-  }
-
-  /**
    * Returns datanode UUID.
    *
    * @return - Datanode ID.
@@ -117,10 +104,9 @@ public class RegisteredCommand extends
    *
    * @return A protobuf message.
    */
-  @Override
   public byte[] getProtoBufMessage() {
-    SCMRegisteredCmdResponseProto.Builder builder =
-        SCMRegisteredCmdResponseProto.newBuilder()
+    SCMRegisteredResponseProto.Builder builder =
+        SCMRegisteredResponseProto.newBuilder()
             .setClusterID(this.clusterID)
             .setDatanodeUUID(this.datanodeUUID)
             .setErrorCode(this.error);
@@ -157,7 +143,7 @@ public class RegisteredCommand extends
      * @param response - RegisteredCmdResponseProto
      * @return RegisteredCommand
      */
-    public  RegisteredCommand getFromProtobuf(SCMRegisteredCmdResponseProto
+    public  RegisteredCommand getFromProtobuf(SCMRegisteredResponseProto
                                                         response) {
       Preconditions.checkNotNull(response);
       if (response.hasHostname() && response.hasIpAddress()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
index c167d59..953e31a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
@@ -18,18 +18,16 @@
 package org.apache.hadoop.ozone.protocol.commands;
 
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 
 import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand;
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
+    .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
 
 /**
  * Informs a datanode to register itself with SCM again.
  */
 public class ReregisterCommand extends
-    SCMCommand<SCMReregisterCmdResponseProto>{
+    SCMCommand<ReregisterCommandProto>{
 
   /**
    * Returns the type of this command.
@@ -37,8 +35,8 @@ public class ReregisterCommand extends
    * @return Type
    */
   @Override
-  public SCMCmdType getType() {
-    return reregisterCommand;
+  public SCMCommandProto.Type getType() {
+    return SCMCommandProto.Type.reregisterCommand;
   }
 
   /**
@@ -51,8 +49,8 @@ public class ReregisterCommand extends
     return getProto().toByteArray();
   }
 
-  public SCMReregisterCmdResponseProto getProto() {
-    return SCMReregisterCmdResponseProto
+  public ReregisterCommandProto getProto() {
+    return ReregisterCommandProto
         .newBuilder()
         .build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
index 73e4194..35ca802 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.protocol.commands;
 
 import com.google.protobuf.GeneratedMessage;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 
 /**
  * A class that acts as the base class to convert between Java and SCM
@@ -31,7 +31,7 @@ public abstract class SCMCommand<T extends GeneratedMessage> {
    * Returns the type of this command.
    * @return Type
    */
-  public  abstract SCMCmdType getType();
+  public  abstract SCMCommandProto.Type getType();
 
   /**
    * Gets the protobuf message of this object.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index a56c57a..40fe189 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -20,24 +20,23 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos
     .ContainerBlocksDeletionACKResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -123,22 +122,16 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
   /**
    * Send by datanode to SCM.
    *
-   * @param datanodeDetailsProto - Datanode Details
-   * @param nodeReport - node report
+   * @param heartbeat node heartbeat
    * @throws IOException
    */
 
   @Override
   public SCMHeartbeatResponseProto sendHeartbeat(
-      DatanodeDetailsProto datanodeDetailsProto,
-      SCMNodeReport nodeReport) throws IOException {
-    SCMHeartbeatRequestProto.Builder req = SCMHeartbeatRequestProto
-        .newBuilder();
-    req.setDatanodeDetails(datanodeDetailsProto);
-    req.setNodeReport(nodeReport);
+      SCMHeartbeatRequestProto heartbeat) throws IOException {
     final SCMHeartbeatResponseProto resp;
     try {
-      resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, req.build());
+      resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, heartbeat);
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
@@ -154,16 +147,16 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
    * @return SCM Command.
    */
   @Override
-  public SCMRegisteredCmdResponseProto register(
-      DatanodeDetailsProto datanodeDetailsProto, SCMNodeReport nodeReport,
-      ContainerReportsRequestProto containerReportsRequestProto)
+  public SCMRegisteredResponseProto register(
+      DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
+      ContainerReportsProto containerReportsRequestProto)
       throws IOException {
     SCMRegisterRequestProto.Builder req =
         SCMRegisterRequestProto.newBuilder();
     req.setDatanodeDetails(datanodeDetailsProto);
     req.setContainerReport(containerReportsRequestProto);
     req.setNodeReport(nodeReport);
-    final SCMRegisteredCmdResponseProto response;
+    final SCMRegisteredResponseProto response;
     try {
       response = rpcProxy.register(NULL_RPC_CONTROLLER, req.build());
     } catch (ServiceException e) {
@@ -172,25 +165,6 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
     return response;
   }
 
-  /**
-   * Send a container report.
-   *
-   * @param reports -- Container report
-   * @return HeartbeatRespose.nullcommand.
-   * @throws IOException
-   */
-  @Override
-  public ContainerReportsResponseProto sendContainerReport(
-      ContainerReportsRequestProto reports) throws IOException {
-    final ContainerReportsResponseProto resp;
-    try {
-      resp = rpcProxy.sendContainerReport(NULL_RPC_CONTROLLER, reports);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    return resp;
-  }
-
   @Override
   public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(
       ContainerBlocksDeletionACKProto deletedBlocks) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 07dba57..7e8bd8a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -19,18 +19,22 @@ package org.apache.hadoop.ozone.protocolPB;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+    .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos
-    .ContainerBlocksDeletionACKResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos
+    .ContainerBlocksDeletionACKResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -55,9 +59,8 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
   }
 
   @Override
-  public StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto
-      getVersion(RpcController controller,
-      StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto request)
+  public SCMVersionResponseProto getVersion(RpcController controller,
+      SCMVersionRequestProto request)
       throws ServiceException {
     try {
       return impl.getVersion(request);
@@ -67,15 +70,13 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
   }
 
   @Override
-  public StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto
-      register(RpcController controller, StorageContainerDatanodeProtocolProtos
-      .SCMRegisterRequestProto request) throws ServiceException {
+  public SCMRegisteredResponseProto register(RpcController controller,
+      SCMRegisterRequestProto request) throws ServiceException {
     try {
-      ContainerReportsRequestProto containerRequestProto = null;
-      SCMNodeReport scmNodeReport = null;
-      containerRequestProto = request.getContainerReport();
-      scmNodeReport = request.getNodeReport();
-      return impl.register(request.getDatanodeDetails(), scmNodeReport,
+      ContainerReportsProto containerRequestProto = request
+          .getContainerReport();
+      NodeReportProto dnNodeReport = request.getNodeReport();
+      return impl.register(request.getDatanodeDetails(), dnNodeReport,
           containerRequestProto);
     } catch (IOException e) {
       throw new ServiceException(e);
@@ -83,27 +84,15 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
   }
 
   @Override
-  public SCMHeartbeatResponseProto
-      sendHeartbeat(RpcController controller,
+  public SCMHeartbeatResponseProto sendHeartbeat(RpcController controller,
       SCMHeartbeatRequestProto request) throws ServiceException {
     try {
-      return impl.sendHeartbeat(request.getDatanodeDetails(),
-          request.getNodeReport());
+      return impl.sendHeartbeat(request);
     } catch (IOException e) {
       throw new ServiceException(e);
     }
   }
 
-  @Override
-  public ContainerReportsResponseProto sendContainerReport(
-      RpcController controller, ContainerReportsRequestProto request)
-      throws ServiceException {
-    try {
-      return impl.sendContainerReport(request);
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
 
   @Override
   public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 20e6af8..cc131e0 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -34,81 +34,74 @@ package hadoop.hdds;
 
 import "hdds.proto";
 
+/**
+ * Request for version info of the software stack on the server.
+ */
+message SCMVersionRequestProto {}
 
 /**
-* This message is send by data node to indicate that it is alive or it is
-* registering with the node manager.
+* Generic response that is send to a version request. This allows keys to be
+* added on the fly and protocol to remain stable.
 */
-message SCMHeartbeatRequestProto {
-  required DatanodeDetailsProto datanodeDetails = 1;
-  optional SCMNodeReport nodeReport = 2;
+message SCMVersionResponseProto {
+  required uint32 softwareVersion = 1;
+  repeated hadoop.hdds.KeyValue keys = 2;
 }
 
-/**
-A container report contains the following information.
-*/
-message ContainerInfo {
-  optional string finalhash = 2;
-  optional int64 size = 3;
-  optional int64 used = 4;
-  optional int64 keyCount = 5;
-  // TODO: move the io count to separate message
-  optional int64 readCount = 6;
-  optional int64 writeCount = 7;
-  optional int64 readBytes = 8;
-  optional int64 writeBytes = 9;
-  required int64 containerID = 10;
-  optional hadoop.hdds.LifeCycleState state = 11;
+message SCMRegisterRequestProto {
+  required DatanodeDetailsProto datanodeDetails = 1;
+  required NodeReportProto nodeReport = 2;
+  required ContainerReportsProto containerReport = 3;
 }
 
-// The deleted blocks which are stored in deletedBlock.db of scm.
-// We don't use BlockID because this only contians multiple localIDs
-// of the same containerID.
-message DeletedBlocksTransaction {
-  required int64 txID = 1;
-  required int64 containerID = 2;
-  repeated int64 localID = 3;
-  // the retry time of sending deleting command to datanode.
-  required int32 count = 4;
+/**
+ * Datanode ID returned by the SCM. This is similar to name node
+ * registeration of a datanode.
+ */
+message SCMRegisteredResponseProto {
+  enum ErrorCode {
+    success = 1;
+    errorNodeNotPermitted = 2;
+  }
+  required ErrorCode errorCode = 1;
+  required string datanodeUUID = 2;
+  required string clusterID = 3;
+  optional SCMNodeAddressList addressList = 4;
+  optional string hostname = 5;
+  optional string ipAddress = 6;
 }
 
 /**
-A set of container reports, max count is generally set to
-8192 since that keeps the size of the reports under 1 MB.
+* This message is send by data node to indicate that it is alive or it is
+* registering with the node manager.
 */
-message ContainerReportsRequestProto {
-  enum reportType {
-    fullReport = 0;
-    deltaReport = 1;
-  }
+message SCMHeartbeatRequestProto {
   required DatanodeDetailsProto datanodeDetails = 1;
-  repeated ContainerInfo reports = 2;
-  required reportType type = 3;
+  optional NodeReportProto nodeReport = 2;
+  optional ContainerReportsProto containerReport = 3;
 }
 
-message ContainerReportsResponseProto {
+/*
+ * A group of commands for the datanode to execute
+ */
+message SCMHeartbeatResponseProto {
+  required string datanodeUUID = 1;
+  repeated SCMCommandProto commands = 2;
 }
 
-/**
-* This message is send along with the heart beat to report datanode
-* storage utilization by SCM.
-*/
-message SCMNodeReport {
-  repeated SCMStorageReport storageReport = 1;
+message SCMNodeAddressList {
+  repeated string addressList = 1;
 }
 
 /**
- * Types of recognized storage media.
- */
-enum StorageTypeProto {
-  DISK = 1;
-  SSD = 2;
-  ARCHIVE = 3;
-  RAM_DISK = 4;
-  PROVIDED = 5;
+* This message is send along with the heart beat to report datanode
+* storage utilization to SCM.
+*/
+message NodeReportProto {
+  repeated StorageReportProto storageReport = 1;
 }
 
-message SCMStorageReport {
+message StorageReportProto {
   required string storageUuid = 1;
   required string storageLocation = 2;
   optional uint64 capacity = 3 [default = 0];
@@ -118,107 +111,82 @@ message SCMStorageReport {
   optional bool failed = 7 [default = false];
 }
 
-message SCMRegisterRequestProto {
-  required DatanodeDetailsProto datanodeDetails = 1;
-  required SCMNodeReport nodeReport = 2;
-  required ContainerReportsRequestProto containerReport = 3;
-}
-
-/**
- * Request for version info of the software stack on the server.
- */
-message SCMVersionRequestProto {
-
-}
-
-/**
-* Generic response that is send to a version request. This allows keys to be
-* added on the fly and protocol to remain stable.
-*/
-message SCMVersionResponseProto {
-  required uint32 softwareVersion = 1;
-  repeated hadoop.hdds.KeyValue keys = 2;
-}
-
-message SCMNodeAddressList {
-  repeated string addressList = 1;
-}
-
 /**
- * Datanode ID returned by the SCM. This is similar to name node
- * registeration of a datanode.
+ * Types of recognized storage media.
  */
-message SCMRegisteredCmdResponseProto {
-  enum ErrorCode {
-    success = 1;
-    errorNodeNotPermitted = 2;
-  }
-  required ErrorCode errorCode = 2;
-  required string datanodeUUID = 3;
-  required string clusterID = 4;
-  optional SCMNodeAddressList addressList = 5;
-  optional string hostname = 6;
-  optional string ipAddress = 7;
+enum StorageTypeProto {
+  DISK = 1;
+  SSD = 2;
+  ARCHIVE = 3;
+  RAM_DISK = 4;
+  PROVIDED = 5;
 }
 
 /**
- * SCM informs a datanode to register itself again.
- * With recieving this command, datanode will transit to REGISTER state.
- */
-message SCMReregisterCmdResponseProto {}
-
-/**
-This command tells the data node to send in the container report when possible
+A set of container reports, max count is generally set to
+8192 since that keeps the size of the reports under 1 MB.
 */
-message SendContainerReportProto {
+message ContainerReportsProto {
+  repeated ContainerInfo reports = 2;
 }
 
-/**
-This command asks the datanode to close a specific container.
-*/
-message SCMCloseContainerCmdResponseProto {
-  required int64 containerID = 1;
-}
 
 /**
-Type of commands supported by SCM to datanode protocol.
+A container report contains the following information.
 */
-enum SCMCmdType {
-  versionCommand = 2;
-  registeredCommand = 3;
-  reregisterCommand = 4;
-  deleteBlocksCommand = 5;
-  closeContainerCommand = 6;
+message ContainerInfo {
+  optional string finalhash = 1;
+  optional int64 size = 2;
+  optional int64 used = 3;
+  optional int64 keyCount = 4;
+  // TODO: move the io count to separate message
+  optional int64 readCount = 5;
+  optional int64 writeCount = 6;
+  optional int64 readBytes = 7;
+  optional int64 writeBytes = 8;
+  required int64 containerID = 9;
+  optional hadoop.hdds.LifeCycleState state = 10;
 }
 
 /*
  * These are commands returned by SCM for to the datanode to execute.
  */
-message SCMCommandResponseProto {
-  required SCMCmdType cmdType = 2; // Type of the command
-  optional SCMRegisteredCmdResponseProto registeredProto = 3;
-  optional SCMVersionResponseProto versionProto = 4;
-  optional SCMReregisterCmdResponseProto reregisterProto = 5;
-  optional SCMDeleteBlocksCmdResponseProto deleteBlocksProto = 6;
-  required string datanodeUUID = 7;
-  optional SCMCloseContainerCmdResponseProto closeContainerProto = 8;
+message SCMCommandProto {
+  enum Type {
+    reregisterCommand = 1;
+    deleteBlocksCommand = 2;
+    closeContainerCommand = 3;
+    deleteContainerCommand = 4;
+  }
+  // TODO: once we start using protoc 3.x, refactor this message using "oneof"
+  required Type commandType = 1;
+  optional ReregisterCommandProto reregisterCommandProto = 2;
+  optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3;
+  optional CloseContainerCommandProto closeContainerCommandProto = 4;
+  optional DeleteContainerCommandProto deleteContainerCommandProto = 5;
 }
 
-
-/*
- * A group of commands for the datanode to execute
+/**
+ * SCM informs a datanode to register itself again.
+ * With recieving this command, datanode will transit to REGISTER state.
  */
-message SCMHeartbeatResponseProto {
-  repeated SCMCommandResponseProto commands = 1;
-}
+message ReregisterCommandProto {}
+
 
 // HB response from SCM, contains a list of block deletion transactions.
-message SCMDeleteBlocksCmdResponseProto {
+message DeleteBlocksCommandProto {
   repeated DeletedBlocksTransaction deletedBlocksTransactions = 1;
 }
 
-// SendACK response returned by datanode to SCM, currently empty.
-message ContainerBlocksDeletionACKResponseProto {
+// The deleted blocks which are stored in deletedBlock.db of scm.
+// We don't use BlockID because this only contians multiple localIDs
+// of the same containerID.
+message DeletedBlocksTransaction {
+  required int64 txID = 1;
+  required int64 containerID = 2;
+  repeated int64 localID = 3;
+  // the retry time of sending deleting command to datanode.
+  required int32 count = 4;
 }
 
 // ACK message datanode sent to SCM, contains the result of
@@ -231,6 +199,24 @@ message ContainerBlocksDeletionACKProto {
   repeated DeleteBlockTransactionResult results = 1;
 }
 
+// SendACK response returned by datanode to SCM, currently empty.
+message ContainerBlocksDeletionACKResponseProto {
+}
+
+/**
+This command asks the datanode to close a specific container.
+*/
+message CloseContainerCommandProto {
+  required int64 containerID = 1;
+}
+
+/**
+This command asks the datanode to close a specific container.
+*/
+message DeleteContainerCommandProto {
+  required int64 containerID = 1;
+}
+
 /**
  * Protocol used from a datanode to StorageContainerManager.
  *
@@ -305,7 +291,7 @@ service StorageContainerDatanodeProtocolService {
   /**
   * Registers a data node with SCM.
   */
-  rpc register (SCMRegisterRequestProto) returns (SCMRegisteredCmdResponseProto);
+  rpc register (SCMRegisterRequestProto) returns (SCMRegisteredResponseProto);
 
   /**
    * Send heartbeat from datanode to SCM. HB's under SCM looks more
@@ -315,12 +301,6 @@ service StorageContainerDatanodeProtocolService {
   rpc sendHeartbeat (SCMHeartbeatRequestProto) returns (SCMHeartbeatResponseProto);
 
   /**
-    send container reports sends the container report to SCM. This will
-    return a null command as response.
-  */
-  rpc sendContainerReport(ContainerReportsRequestProto) returns (ContainerReportsResponseProto);
-
-  /**
    * Sends the block deletion ACK to SCM.
    */
   rpc sendContainerBlocksDeletionACK (ContainerBlocksDeletionACKProto) returns (ContainerBlocksDeletionACKResponseProto);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index c57a366..0ee6321 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -16,12 +16,12 @@
  */
 package org.apache.hadoop.ozone.container.common;
 
-import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -30,13 +30,13 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 
@@ -56,7 +56,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   // Map of datanode to containers
   private Map<DatanodeDetails, Map<String, ContainerInfo>> nodeContainers =
       new HashMap();
-  private Map<DatanodeDetails, SCMNodeReport> nodeReports = new HashMap<>();
+  private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
   /**
    * Returns the number of heartbeats made to this class.
    *
@@ -166,20 +166,17 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   /**
    * Used by data node to send a Heartbeat.
    *
-   * @param datanodeDetailsProto - DatanodeDetailsProto.
-   * @param nodeReport - node report.
+   * @param heartbeat - node heartbeat.
    * @return - SCMHeartbeatResponseProto
    * @throws IOException
    */
   @Override
   public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto
-      sendHeartbeat(DatanodeDetailsProto datanodeDetailsProto,
-                    SCMNodeReport nodeReport)
-      throws IOException {
+      sendHeartbeat(SCMHeartbeatRequestProto heartbeat) throws IOException {
     rpcCount.incrementAndGet();
     heartbeatCount.incrementAndGet();
     sleepIfNeeded();
-    List<SCMCommandResponseProto>
+    List<SCMCommandProto>
         cmdResponses = new LinkedList<>();
     return SCMHeartbeatResponseProto.newBuilder().addAllCommands(cmdResponses)
         .build();
@@ -193,21 +190,19 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
    */
   @Override
   public StorageContainerDatanodeProtocolProtos
-      .SCMRegisteredCmdResponseProto register(
-          DatanodeDetailsProto datanodeDetailsProto, SCMNodeReport nodeReport,
-          StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto
+      .SCMRegisteredResponseProto register(
+          DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
+          StorageContainerDatanodeProtocolProtos.ContainerReportsProto
               containerReportsRequestProto)
       throws IOException {
     rpcCount.incrementAndGet();
-    sendContainerReport(containerReportsRequestProto);
     updateNodeReport(datanodeDetailsProto, nodeReport);
     sleepIfNeeded();
-    return StorageContainerDatanodeProtocolProtos
-        .SCMRegisteredCmdResponseProto
+    return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
         .newBuilder().setClusterID(UUID.randomUUID().toString())
         .setDatanodeUUID(datanodeDetailsProto.getUuid()).setErrorCode(
             StorageContainerDatanodeProtocolProtos
-                .SCMRegisteredCmdResponseProto.ErrorCode.success).build();
+                .SCMRegisteredResponseProto.ErrorCode.success).build();
   }
 
   /**
@@ -216,19 +211,19 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
    * @param nodeReport
    */
   public void updateNodeReport(DatanodeDetailsProto datanodeDetailsProto,
-      SCMNodeReport nodeReport) {
+      NodeReportProto nodeReport) {
     DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
         datanodeDetailsProto);
-    SCMNodeReport.Builder datanodeReport = SCMNodeReport.newBuilder();
+    NodeReportProto.Builder nodeReportProto = NodeReportProto.newBuilder();
 
-    List<SCMStorageReport> storageReports =
+    List<StorageReportProto> storageReports =
         nodeReport.getStorageReportList();
 
-    for(SCMStorageReport report : storageReports) {
-      datanodeReport.addStorageReport(report);
+    for(StorageReportProto report : storageReports) {
+      nodeReportProto.addStorageReport(report);
     }
 
-    nodeReports.put(datanode, datanodeReport.build());
+    nodeReports.put(datanode, nodeReportProto.build());
 
   }
 
@@ -254,39 +249,6 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
     return 0;
   }
 
-  /**
-   * Send a container report.
-   *
-   * @param reports -- Container report
-   * @return HeartbeatResponse.nullcommand.
-   * @throws IOException
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto
-      sendContainerReport(StorageContainerDatanodeProtocolProtos
-      .ContainerReportsRequestProto reports) throws IOException {
-    Preconditions.checkNotNull(reports);
-    containerReportsCount.incrementAndGet();
-
-    DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
-        reports.getDatanodeDetails());
-    if (reports.getReportsCount() > 0) {
-      Map containers = nodeContainers.get(datanode);
-      if (containers == null) {
-        containers = new LinkedHashMap();
-        nodeContainers.put(datanode, containers);
-      }
-
-      for (StorageContainerDatanodeProtocolProtos.ContainerInfo report:
-          reports.getReportsList()) {
-        containers.put(report.getContainerID(), report);
-      }
-    }
-
-    return StorageContainerDatanodeProtocolProtos
-        .ContainerReportsResponseProto.newBuilder().build();
-  }
-
   @Override
   public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(
       ContainerBlocksDeletionACKProto request) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fdc993a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fdc993a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fdc993a

Branch: refs/heads/HDDS-4
Commit: 8fdc993a993728c65084d7dc3ac469059cb1f603
Parents: 9dbf4f0
Author: Inigo Goiri <in...@apache.org>
Authored: Mon May 28 16:45:42 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon May 28 16:45:42 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/tools/TestHadoopArchiveLogs.java  | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fdc993a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
index 2ddd4c5..a1b662c 100644
--- a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
+++ b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -278,7 +279,7 @@ public class TestHadoopArchiveLogs {
     hal.generateScript(localScript);
     Assert.assertTrue(localScript.exists());
     String script = IOUtils.toString(localScript.toURI());
-    String[] lines = script.split(System.lineSeparator());
+    String[] lines = script.split("\n");
     Assert.assertEquals(22, lines.length);
     Assert.assertEquals("#!/bin/bash", lines[0]);
     Assert.assertEquals("set -e", lines[1]);
@@ -368,7 +369,8 @@ public class TestHadoopArchiveLogs {
     Assert.assertTrue(dirPrepared);
     Assert.assertTrue(fs.exists(workingDir));
     Assert.assertEquals(
-        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+            !Shell.WINDOWS),
         fs.getFileStatus(workingDir).getPermission());
     // Throw a file in the dir
     Path dummyFile = new Path(workingDir, "dummy.txt");
@@ -381,7 +383,8 @@ public class TestHadoopArchiveLogs {
     Assert.assertTrue(fs.exists(workingDir));
     Assert.assertTrue(fs.exists(dummyFile));
     Assert.assertEquals(
-        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+            !Shell.WINDOWS),
         fs.getFileStatus(workingDir).getPermission());
     // -force is true and the dir exists, so it will recreate it and the dummy
     // won't exist anymore
@@ -390,7 +393,8 @@ public class TestHadoopArchiveLogs {
     Assert.assertTrue(dirPrepared);
     Assert.assertTrue(fs.exists(workingDir));
     Assert.assertEquals(
-        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+            !Shell.WINDOWS),
         fs.getFileStatus(workingDir).getPermission());
     Assert.assertFalse(fs.exists(dummyFile));
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDDS-128. Support for DN to SCM signaling. Contributed by Nanda Kumar.

Posted by xy...@apache.org.
HDDS-128. Support for DN to SCM signaling.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02c4b89f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02c4b89f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02c4b89f

Branch: refs/heads/HDDS-4
Commit: 02c4b89f99c93cf0a98509bc65a3313b653e98ff
Parents: 778a4a2
Author: Anu Engineer <ae...@apache.org>
Authored: Wed May 30 13:15:44 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed May 30 14:21:13 2018 -0700

----------------------------------------------------------------------
 .../StorageContainerDatanodeProtocol.proto      | 28 +++++++++++++++-----
 1 file changed, 21 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02c4b89f/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index cc131e0..ac2314e 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -79,6 +79,7 @@ message SCMHeartbeatRequestProto {
   required DatanodeDetailsProto datanodeDetails = 1;
   optional NodeReportProto nodeReport = 2;
   optional ContainerReportsProto containerReport = 3;
+  optional ContainerActionsProto containerActions = 4;
 }
 
 /*
@@ -122,20 +123,33 @@ enum StorageTypeProto {
   PROVIDED = 5;
 }
 
-/**
-A set of container reports, max count is generally set to
-8192 since that keeps the size of the reports under 1 MB.
-*/
 message ContainerReportsProto {
-  repeated ContainerInfo reports = 2;
+  repeated ContainerInfo reports = 1;
 }
 
+message ContainerActionsProto {
+  repeated ContainerAction containerActions = 1;
+}
+
+message ContainerAction {
+  enum Action {
+    CLOSE = 1;
+  }
+
+  enum Reason {
+    CONTAINER_FULL = 1;
+  }
+
+  required ContainerInfo container = 1;
+  required Action action = 2;
+  optional Reason reason = 3;
+}
 
 /**
 A container report contains the following information.
 */
 message ContainerInfo {
-  optional string finalhash = 1;
+  required int64 containerID = 1;
   optional int64 size = 2;
   optional int64 used = 3;
   optional int64 keyCount = 4;
@@ -144,7 +158,7 @@ message ContainerInfo {
   optional int64 writeCount = 6;
   optional int64 readBytes = 7;
   optional int64 writeBytes = 8;
-  required int64 containerID = 9;
+  optional string finalhash = 9;
   optional hadoop.hdds.LifeCycleState state = 10;
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HADOOP-15473. Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota.

Posted by xy...@apache.org.
HADOOP-15473. Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02322de3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02322de3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02322de3

Branch: refs/heads/HDDS-4
Commit: 02322de3f95ba78a22c057037ef61aa3ab1d3824
Parents: 8d5509c
Author: Xiao Chen <xi...@apache.org>
Authored: Fri May 25 09:08:15 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri May 25 09:10:51 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/crypto/key/KeyProvider.java   | 18 +++++++++++++++
 .../fs/CommonConfigurationKeysPublic.java       |  7 ++++++
 .../src/main/resources/core-default.xml         | 23 ++++++++++++++++++++
 3 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index 5d670e5..050540b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 import javax.crypto.KeyGenerator;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER;
+
 /**
  * A provider of secret key material for Hadoop applications. Provides an
  * abstraction to separate key storage from users of encryption. It
@@ -61,6 +63,14 @@ public abstract class KeyProvider {
       CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_KEY;
   public static final int DEFAULT_BITLENGTH = CommonConfigurationKeysPublic.
       HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_DEFAULT;
+  public static final String JCEKS_KEY_SERIALFILTER_DEFAULT =
+      "java.lang.Enum;"
+          + "java.security.KeyRep;"
+          + "java.security.KeyRep$Type;"
+          + "javax.crypto.spec.SecretKeySpec;"
+          + "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;"
+          + "!*";
+  public static final String JCEKS_KEY_SERIAL_FILTER = "jceks.key.serialFilter";
 
   private final Configuration conf;
 
@@ -394,6 +404,14 @@ public abstract class KeyProvider {
    */
   public KeyProvider(Configuration conf) {
     this.conf = new Configuration(conf);
+    // Added for HADOOP-15473. Configured serialFilter property fixes
+    // java.security.UnrecoverableKeyException in JDK 8u171.
+    if(System.getProperty(JCEKS_KEY_SERIAL_FILTER) == null) {
+      String serialFilter =
+          conf.get(HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER,
+              JCEKS_KEY_SERIALFILTER_DEFAULT);
+      System.setProperty(JCEKS_KEY_SERIAL_FILTER, serialFilter);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 8837cfb..9e0ba20 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -662,6 +662,13 @@ public class CommonConfigurationKeysPublic {
    * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
    * core-default.xml</a>
    */
+  public static final String HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER =
+      "hadoop.security.crypto.jceks.key.serialfilter";
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = 
     "hadoop.security.crypto.buffer.size";
   /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index fad2985..9564587 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2487,6 +2487,29 @@
 </property>
 
 <property>
+  <name>hadoop.security.crypto.jceks.key.serialfilter</name>
+  <description>
+    Enhanced KeyStore Mechanisms in JDK 8u171 introduced jceks.key.serialFilter.
+    If jceks.key.serialFilter is configured, the JCEKS KeyStore uses it during
+    the deserialization of the encrypted Key object stored inside a
+    SecretKeyEntry.
+    If jceks.key.serialFilter is not configured it will cause an error when
+    recovering keystore file in KeyProviderFactory when recovering key from
+    keystore file using JDK 8u171 or newer. The filter pattern uses the same
+    format as jdk.serialFilter.
+
+    The value of this property will be used as the following:
+    1. The value of jceks.key.serialFilter system property takes precedence
+    over the value of this property.
+    2. In the absence of jceks.key.serialFilter system property the value of
+    this property will be set as the value of jceks.key.serialFilter.
+    3. If the value of this property and jceks.key.serialFilter system
+    property has not been set, org.apache.hadoop.crypto.key.KeyProvider
+    sets a default value for jceks.key.serialFilter.
+  </description>
+</property>
+
+<property>
   <name>hadoop.security.crypto.buffer.size</name>
   <value>8192</value>
   <description>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-8368. yarn app start cli should print applicationId. Contributed by Rohith Sharma K S

Posted by xy...@apache.org.
YARN-8368. yarn app start cli should print applicationId. Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96eefcc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96eefcc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96eefcc8

Branch: refs/heads/HDDS-4
Commit: 96eefcc84aacc4cc82ad7e3e72c5bdad56f4a7b7
Parents: 47c31ff
Author: Billie Rinaldi <bi...@apache.org>
Authored: Wed May 30 12:37:01 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Wed May 30 12:37:43 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/service/webapp/ApiServer.java   | 28 +++++++++++---------
 .../hadoop/yarn/service/ServiceClientTest.java  | 18 ++++++++++++-
 .../yarn/service/client/ServiceClient.java      |  2 ++
 3 files changed, 35 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96eefcc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 46c9abe..578273c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -641,20 +641,24 @@ public class ApiServer {
   private Response startService(String appName,
       final UserGroupInformation ugi) throws IOException,
       InterruptedException {
-    ugi.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws YarnException, IOException {
-        ServiceClient sc = getServiceClient();
-        sc.init(YARN_CONFIG);
-        sc.start();
-        sc.actionStart(appName);
-        sc.close();
-        return null;
-      }
-    });
+    ApplicationId appId =
+        ugi.doAs(new PrivilegedExceptionAction<ApplicationId>() {
+          @Override public ApplicationId run()
+              throws YarnException, IOException {
+            ServiceClient sc = getServiceClient();
+            sc.init(YARN_CONFIG);
+            sc.start();
+            sc.actionStart(appName);
+            ApplicationId appId = sc.getAppId(appName);
+            sc.close();
+            return appId;
+          }
+        });
     LOG.info("Successfully started service " + appName);
     ServiceStatus status = new ServiceStatus();
-    status.setDiagnostics("Service " + appName + " is successfully started.");
+    status.setDiagnostics(
+        "Service " + appName + " is successfully started with ApplicationId: "
+            + appId);
     status.setState(ServiceState.ACCEPTED);
     return formatResponse(Status.OK, status);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96eefcc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
index 75b9486..81be750 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
@@ -34,8 +34,10 @@ import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
@@ -50,6 +52,8 @@ public class ServiceClientTest extends ServiceClient {
   private Service goodServiceStatus = buildLiveGoodService();
   private boolean initialized;
   private Set<String> expectedInstances = new HashSet<>();
+  private Map<String, ApplicationId> serviceAppId = new HashMap<>();
+
 
   public ServiceClientTest() {
     super();
@@ -83,7 +87,10 @@ public class ServiceClientTest extends ServiceClient {
   public ApplicationId actionCreate(Service service) throws IOException {
     ServiceApiUtil.validateAndResolveService(service,
         new SliderFileSystem(conf), getConfig());
-    return ApplicationId.newInstance(System.currentTimeMillis(), 1);
+    ApplicationId appId =
+        ApplicationId.newInstance(System.currentTimeMillis(), 1);
+    serviceAppId.put(service.getName(), appId);
+    return appId;
   }
 
   @Override
@@ -99,6 +106,9 @@ public class ServiceClientTest extends ServiceClient {
   public int actionStart(String serviceName)
       throws YarnException, IOException {
     if (serviceName != null && serviceName.equals("jenkins")) {
+      ApplicationId appId =
+          ApplicationId.newInstance(System.currentTimeMillis(), 1);
+      serviceAppId.put(serviceName, appId);
       return EXIT_SUCCESS;
     } else {
       throw new ApplicationNotFoundException("");
@@ -207,4 +217,10 @@ public class ServiceClientTest extends ServiceClient {
     comp.setContainers(containers);
     return service;
   }
+
+  @Override
+  public synchronized ApplicationId getAppId(String serviceName)
+      throws IOException, YarnException {
+    return serviceAppId.get(serviceName);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96eefcc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 0ab3322..e86ecbc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -978,6 +978,8 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
       // see if it is actually running and bail out;
       verifyNoLiveAppInRM(serviceName, "start");
       ApplicationId appId = submitApp(service);
+      cachedAppInfo.put(serviceName, new AppInfo(appId, service
+          .getKerberosPrincipal().getPrincipalName()));
       service.setId(appId.toString());
       // write app definition on to hdfs
       Path appJson = ServiceApiUtil.writeAppDefinition(fs, appDir, service);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-8350. NPE in service AM related to placement policy. Contributed by Gour Saha

Posted by xy...@apache.org.
YARN-8350. NPE in service AM related to placement policy. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/778a4a24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/778a4a24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/778a4a24

Branch: refs/heads/HDDS-4
Commit: 778a4a24be176382a5704f709c00bdfcfe6ddc8c
Parents: 96eefcc
Author: Billie Rinaldi <bi...@apache.org>
Authored: Wed May 30 13:19:13 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Wed May 30 13:19:13 2018 -0700

----------------------------------------------------------------------
 .../yarn/service/component/Component.java       | 114 ++++++++++---------
 .../exceptions/RestApiErrorMessages.java        |   8 ++
 .../yarn/service/utils/ServiceApiUtil.java      |  24 +++-
 .../hadoop/yarn/service/TestServiceApiUtil.java |  44 ++++++-
 4 files changed, 130 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/778a4a24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index 931877e..a1ee796 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -694,62 +694,66 @@ public class Component implements EventHandler<ComponentEvent> {
       // composite constraints then this AND-ed composite constraint is not
       // used.
       PlacementConstraint finalConstraint = null;
-      for (org.apache.hadoop.yarn.service.api.records.PlacementConstraint
-          yarnServiceConstraint : placementPolicy.getConstraints()) {
-        List<TargetExpression> targetExpressions = new ArrayList<>();
-        // Currently only intra-application allocation tags are supported.
-        if (!yarnServiceConstraint.getTargetTags().isEmpty()) {
-          targetExpressions.add(PlacementTargets.allocationTag(
-              yarnServiceConstraint.getTargetTags().toArray(new String[0])));
-        }
-        // Add all node attributes
-        for (Map.Entry<String, List<String>> attribute : yarnServiceConstraint
-            .getNodeAttributes().entrySet()) {
-          targetExpressions.add(PlacementTargets.nodeAttribute(
-              attribute.getKey(), attribute.getValue().toArray(new String[0])));
-        }
-        // Add all node partitions
-        if (!yarnServiceConstraint.getNodePartitions().isEmpty()) {
-          targetExpressions
-              .add(PlacementTargets.nodePartition(yarnServiceConstraint
-                  .getNodePartitions().toArray(new String[0])));
-        }
-        PlacementConstraint constraint = null;
-        switch (yarnServiceConstraint.getType()) {
-        case AFFINITY:
-          constraint = PlacementConstraints
-              .targetIn(yarnServiceConstraint.getScope().getValue(),
-                  targetExpressions.toArray(new TargetExpression[0]))
-              .build();
-          break;
-        case ANTI_AFFINITY:
-          constraint = PlacementConstraints
-              .targetNotIn(yarnServiceConstraint.getScope().getValue(),
-                  targetExpressions.toArray(new TargetExpression[0]))
-              .build();
-          break;
-        case AFFINITY_WITH_CARDINALITY:
-          constraint = PlacementConstraints.targetCardinality(
-              yarnServiceConstraint.getScope().name().toLowerCase(),
-              yarnServiceConstraint.getMinCardinality() == null ? 0
-                  : yarnServiceConstraint.getMinCardinality().intValue(),
-              yarnServiceConstraint.getMaxCardinality() == null
-                  ? Integer.MAX_VALUE
-                  : yarnServiceConstraint.getMaxCardinality().intValue(),
-              targetExpressions.toArray(new TargetExpression[0])).build();
-          break;
-        }
-        // The default AND-ed final composite constraint
-        if (finalConstraint != null) {
-          finalConstraint = PlacementConstraints
-              .and(constraint.getConstraintExpr(),
-                  finalConstraint.getConstraintExpr())
-              .build();
-        } else {
-          finalConstraint = constraint;
+      if (placementPolicy != null) {
+        for (org.apache.hadoop.yarn.service.api.records.PlacementConstraint
+            yarnServiceConstraint : placementPolicy.getConstraints()) {
+          List<TargetExpression> targetExpressions = new ArrayList<>();
+          // Currently only intra-application allocation tags are supported.
+          if (!yarnServiceConstraint.getTargetTags().isEmpty()) {
+            targetExpressions.add(PlacementTargets.allocationTag(
+                yarnServiceConstraint.getTargetTags().toArray(new String[0])));
+          }
+          // Add all node attributes
+          for (Map.Entry<String, List<String>> attribute : yarnServiceConstraint
+              .getNodeAttributes().entrySet()) {
+            targetExpressions
+                .add(PlacementTargets.nodeAttribute(attribute.getKey(),
+                    attribute.getValue().toArray(new String[0])));
+          }
+          // Add all node partitions
+          if (!yarnServiceConstraint.getNodePartitions().isEmpty()) {
+            targetExpressions
+                .add(PlacementTargets.nodePartition(yarnServiceConstraint
+                    .getNodePartitions().toArray(new String[0])));
+          }
+          PlacementConstraint constraint = null;
+          switch (yarnServiceConstraint.getType()) {
+          case AFFINITY:
+            constraint = PlacementConstraints
+                .targetIn(yarnServiceConstraint.getScope().getValue(),
+                    targetExpressions.toArray(new TargetExpression[0]))
+                .build();
+            break;
+          case ANTI_AFFINITY:
+            constraint = PlacementConstraints
+                .targetNotIn(yarnServiceConstraint.getScope().getValue(),
+                    targetExpressions.toArray(new TargetExpression[0]))
+                .build();
+            break;
+          case AFFINITY_WITH_CARDINALITY:
+            constraint = PlacementConstraints.targetCardinality(
+                yarnServiceConstraint.getScope().name().toLowerCase(),
+                yarnServiceConstraint.getMinCardinality() == null ? 0
+                    : yarnServiceConstraint.getMinCardinality().intValue(),
+                yarnServiceConstraint.getMaxCardinality() == null
+                    ? Integer.MAX_VALUE
+                    : yarnServiceConstraint.getMaxCardinality().intValue(),
+                targetExpressions.toArray(new TargetExpression[0])).build();
+            break;
+          }
+          // The default AND-ed final composite constraint
+          if (finalConstraint != null) {
+            finalConstraint = PlacementConstraints
+                .and(constraint.getConstraintExpr(),
+                    finalConstraint.getConstraintExpr())
+                .build();
+          } else {
+            finalConstraint = constraint;
+          }
+          LOG.debug("[COMPONENT {}] Placement constraint: {}",
+              componentSpec.getName(),
+              constraint.getConstraintExpr().toString());
         }
-        LOG.debug("[COMPONENT {}] Placement constraint: {}",
-            componentSpec.getName(), constraint.getConstraintExpr().toString());
       }
       ResourceSizing resourceSizing = ResourceSizing.newInstance((int) count,
           resource);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/778a4a24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 5b6eac3..1d2d719 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -91,6 +91,14 @@ public interface RestApiErrorMessages {
 
   String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
       + " component level, needs corresponding values set at service level";
+  // Note: %sin is not a typo. Constraint name is optional so the error messages
+  // below handle that scenario by adding a space if name is specified.
+  String ERROR_PLACEMENT_POLICY_CONSTRAINT_TYPE_NULL = "Type not specified "
+      + "for constraint %sin placement policy of component %s.";
+  String ERROR_PLACEMENT_POLICY_CONSTRAINT_SCOPE_NULL = "Scope not specified "
+      + "for constraint %sin placement policy of component %s.";
+  String ERROR_PLACEMENT_POLICY_CONSTRAINT_TAGS_NULL = "Tag(s) not specified "
+      + "for constraint %sin placement policy of component %s.";
   String ERROR_PLACEMENT_POLICY_TAG_NAME_NOT_SAME = "Invalid target tag %s "
       + "specified in placement policy of component %s. For now, target tags "
       + "support self reference only. Specifying anything other than its "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/778a4a24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index 2f826fa..6101bf0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Configuration;
 import org.apache.hadoop.yarn.service.api.records.KerberosPrincipal;
 import org.apache.hadoop.yarn.service.api.records.PlacementConstraint;
+import org.apache.hadoop.yarn.service.api.records.PlacementPolicy;
 import org.apache.hadoop.yarn.service.api.records.Resource;
 import org.apache.hadoop.yarn.service.exceptions.SliderException;
 import org.apache.hadoop.yarn.service.conf.RestApiConstants;
@@ -314,9 +315,28 @@ public class ServiceApiUtil {
   private static void validatePlacementPolicy(List<Component> components,
       Set<String> componentNames) {
     for (Component comp : components) {
-      if (comp.getPlacementPolicy() != null) {
-        for (PlacementConstraint constraint : comp.getPlacementPolicy()
+      PlacementPolicy placementPolicy = comp.getPlacementPolicy();
+      if (placementPolicy != null) {
+        for (PlacementConstraint constraint : placementPolicy
             .getConstraints()) {
+          if (constraint.getType() == null) {
+            throw new IllegalArgumentException(String.format(
+              RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_TYPE_NULL,
+              constraint.getName() == null ? "" : constraint.getName() + " ",
+              comp.getName()));
+          }
+          if (constraint.getScope() == null) {
+            throw new IllegalArgumentException(String.format(
+              RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_SCOPE_NULL,
+              constraint.getName() == null ? "" : constraint.getName() + " ",
+              comp.getName()));
+          }
+          if (constraint.getTargetTags().isEmpty()) {
+            throw new IllegalArgumentException(String.format(
+              RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_TAGS_NULL,
+              constraint.getName() == null ? "" : constraint.getName() + " ",
+              comp.getName()));
+          }
           for (String targetTag : constraint.getTargetTags()) {
             if (!comp.getName().equals(targetTag)) {
               throw new IllegalArgumentException(String.format(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/778a4a24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
index b209bbb..243c6b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.KerberosPrincipal;
 import org.apache.hadoop.yarn.service.api.records.PlacementConstraint;
 import org.apache.hadoop.yarn.service.api.records.PlacementPolicy;
+import org.apache.hadoop.yarn.service.api.records.PlacementScope;
+import org.apache.hadoop.yarn.service.api.records.PlacementType;
 import org.apache.hadoop.yarn.service.api.records.Resource;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages;
@@ -503,13 +505,48 @@ public class TestServiceApiUtil {
     PlacementPolicy pp = new PlacementPolicy();
     PlacementConstraint pc = new PlacementConstraint();
     pc.setName("CA1");
-    pc.setTargetTags(Collections.singletonList("comp-invalid"));
     pp.setConstraints(Collections.singletonList(pc));
     comp.setPlacementPolicy(pp);
 
     try {
       ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
-      Assert.fail(EXCEPTION_PREFIX + "service with empty placement");
+      Assert.fail(EXCEPTION_PREFIX + "constraint with no type");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_TYPE_NULL,
+          "CA1 ", "comp-a"), e.getMessage());
+    }
+
+    // Set the type
+    pc.setType(PlacementType.ANTI_AFFINITY);
+
+    try {
+      ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
+      Assert.fail(EXCEPTION_PREFIX + "constraint with no scope");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_SCOPE_NULL,
+          "CA1 ", "comp-a"), e.getMessage());
+    }
+
+    // Set the scope
+    pc.setScope(PlacementScope.NODE);
+
+    try {
+      ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
+      Assert.fail(EXCEPTION_PREFIX + "constraint with no tag(s)");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_TAGS_NULL,
+          "CA1 ", "comp-a"), e.getMessage());
+    }
+
+    // Set a target tag - but an invalid one
+    pc.setTargetTags(Collections.singletonList("comp-invalid"));
+
+    try {
+      ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
+      Assert.fail(EXCEPTION_PREFIX + "constraint with invalid tag name");
     } catch (IllegalArgumentException e) {
       assertEquals(
           String.format(
@@ -518,9 +555,10 @@ public class TestServiceApiUtil {
           e.getMessage());
     }
 
+    // Set valid target tags now
     pc.setTargetTags(Collections.singletonList("comp-a"));
 
-    // now it should succeed
+    // Finally it should succeed
     try {
       ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
     } catch (IllegalArgumentException e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne.

Posted by xy...@apache.org.
YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c343669
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c343669
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c343669

Branch: refs/heads/HDDS-4
Commit: 7c343669baf660df3b70d58987d6e68aec54d6fa
Parents: 61df174
Author: Sunil G <su...@apache.org>
Authored: Mon May 28 16:32:53 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Mon May 28 16:32:53 2018 +0530

----------------------------------------------------------------------
 .../FifoIntraQueuePreemptionPlugin.java         |  37 ++-
 .../capacity/IntraQueueCandidatesSelector.java  |  40 +++
 .../monitor/capacity/TempAppPerPartition.java   |   9 +
 .../AbstractComparatorOrderingPolicy.java       |   2 -
 ...alCapacityPreemptionPolicyMockFramework.java |  12 +-
 ...yPreemptionPolicyIntraQueueFairOrdering.java | 276 +++++++++++++++++++
 6 files changed, 366 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 40f333f..12c178c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAFairOrderingComparator;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAPriorityComparator;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.IntraQueuePreemptionOrderPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -41,6 +42,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
@@ -263,8 +266,17 @@ public class FifoIntraQueuePreemptionPlugin
       Resource queueReassignableResource,
       PriorityQueue<TempAppPerPartition> orderedByPriority) {
 
-    Comparator<TempAppPerPartition> reverseComp = Collections
-        .reverseOrder(new TAPriorityComparator());
+    Comparator<TempAppPerPartition> reverseComp;
+    OrderingPolicy<FiCaSchedulerApp> queueOrderingPolicy =
+        tq.leafQueue.getOrderingPolicy();
+    if (queueOrderingPolicy instanceof FairOrderingPolicy
+        && (context.getIntraQueuePreemptionOrderPolicy()
+            == IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) {
+      reverseComp = Collections.reverseOrder(
+          new TAFairOrderingComparator(this.rc, clusterResource));
+    } else {
+      reverseComp = Collections.reverseOrder(new TAPriorityComparator());
+    }
     TreeSet<TempAppPerPartition> orderedApps = new TreeSet<>(reverseComp);
 
     String partition = tq.partition;
@@ -355,7 +367,16 @@ public class FifoIntraQueuePreemptionPlugin
       TempQueuePerPartition tq, Collection<FiCaSchedulerApp> apps,
       Resource clusterResource,
       Map<String, Resource> perUserAMUsed) {
-    TAPriorityComparator taComparator = new TAPriorityComparator();
+    Comparator<TempAppPerPartition> taComparator;
+    OrderingPolicy<FiCaSchedulerApp> orderingPolicy =
+        tq.leafQueue.getOrderingPolicy();
+    if (orderingPolicy instanceof FairOrderingPolicy
+        && (context.getIntraQueuePreemptionOrderPolicy()
+            == IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) {
+      taComparator = new TAFairOrderingComparator(this.rc, clusterResource);
+    } else {
+       taComparator = new TAPriorityComparator();
+    }
     PriorityQueue<TempAppPerPartition> orderedByPriority = new PriorityQueue<>(
         100, taComparator);
 
@@ -393,13 +414,12 @@ public class FifoIntraQueuePreemptionPlugin
       // Set ideal allocation of app as 0.
       tmpApp.idealAssigned = Resources.createResource(0, 0);
 
-      orderedByPriority.add(tmpApp);
-
       // Create a TempUserPerPartition structure to hold more information
       // regarding each user's entities such as UserLimit etc. This could
       // be kept in a user to TempUserPerPartition map for further reference.
       String userName = app.getUser();
-      if (!usersPerPartition.containsKey(userName)) {
+      TempUserPerPartition tmpUser = usersPerPartition.get(userName);
+      if (tmpUser == null) {
         ResourceUsage userResourceUsage = tq.leafQueue.getUser(userName)
             .getResourceUsage();
 
@@ -409,7 +429,7 @@ public class FifoIntraQueuePreemptionPlugin
         amUsed = (userSpecificAmUsed == null)
             ? Resources.none() : userSpecificAmUsed;
 
-        TempUserPerPartition tmpUser = new TempUserPerPartition(
+        tmpUser = new TempUserPerPartition(
             tq.leafQueue.getUser(userName), tq.queueName,
             Resources.clone(userResourceUsage.getUsed(partition)),
             Resources.clone(amUsed),
@@ -432,7 +452,10 @@ public class FifoIntraQueuePreemptionPlugin
         tmpUser.idealAssigned = Resources.createResource(0, 0);
         tq.addUserPerPartition(userName, tmpUser);
       }
+      tmpApp.setTempUserPerPartition(tmpUser);
+      orderedByPriority.add(tmpApp);
     }
+
     return orderedByPriority;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index a91fac7..8ab9507 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.AbstractComparatorOrderingPolicy;
+import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import java.io.Serializable;
@@ -64,6 +66,44 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
     }
   }
 
+  /*
+   * Order first by amount used from least to most. Then order from oldest to
+   * youngest if amount used is the same.
+   */
+  static class TAFairOrderingComparator
+      implements Comparator<TempAppPerPartition> {
+
+    private ResourceCalculator rc;
+    private Resource clusterRes;
+
+    TAFairOrderingComparator(ResourceCalculator rc, Resource clusterRes) {
+      this.rc = rc;
+      this.clusterRes = clusterRes;
+    }
+
+    @Override
+    public int compare(TempAppPerPartition ta1, TempAppPerPartition ta2) {
+      if (ta1.getUser().equals(ta2.getUser())) {
+        AbstractComparatorOrderingPolicy<FiCaSchedulerApp> acop =
+            (AbstractComparatorOrderingPolicy<FiCaSchedulerApp>)
+            ta1.getFiCaSchedulerApp().getCSLeafQueue().getOrderingPolicy();
+        return acop.getComparator()
+                  .compare(ta1.getFiCaSchedulerApp(), ta2.getFiCaSchedulerApp());
+      } else {
+        Resource usedByUser1 = ta1.getTempUserPerPartition().getUsedDeductAM();
+        Resource usedByUser2 = ta2.getTempUserPerPartition().getUsedDeductAM();
+        if (Resources.equals(usedByUser1, usedByUser2)) {
+          return ta1.getApplicationId().compareTo(ta2.getApplicationId());
+        }
+        if (Resources.lessThan(rc, clusterRes, usedByUser1, usedByUser2)) {
+          return -1;
+        } else {
+          return 1;
+        }
+      }
+    }
+  }
+
   IntraQueuePreemptionComputePlugin fifoPreemptionComputePlugin = null;
   final CapacitySchedulerPreemptionContext context;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
index e9a934b..05d8096 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
@@ -34,6 +34,7 @@ public class TempAppPerPartition extends AbstractPreemptionEntity {
   // Following fields are settled and used by candidate selection policies
   private final int priority;
   private final ApplicationId applicationId;
+  private TempUserPerPartition tempUser;
 
   FiCaSchedulerApp app;
 
@@ -102,4 +103,12 @@ public class TempAppPerPartition extends AbstractPreemptionEntity {
       Resources.subtractFrom(getActuallyToBePreempted(), toBeDeduct);
     }
   }
+
+  public void setTempUserPerPartition(TempUserPerPartition tu) {
+    tempUser = tu;
+  }
+
+  public TempUserPerPartition getTempUserPerPartition() {
+    return tempUser;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
index b7cb1bf..09dd3bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
@@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
-import com.google.common.annotations.VisibleForTesting;
 
 
 /**
@@ -89,7 +88,6 @@ public abstract class AbstractComparatorOrderingPolicy<S extends SchedulableEnti
     }
   }
 
-  @VisibleForTesting
   public Comparator<SchedulableEntity> getComparator() {
     return comparator; 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
index a972584..64b56fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preempti
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -64,6 +65,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -337,9 +339,11 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
           .thenReturn(pendingForDefaultPartition);
 
       // need to set pending resource in resource usage as well
-      ResourceUsage ru = new ResourceUsage();
+      ResourceUsage ru = Mockito.spy(new ResourceUsage());
       ru.setUsed(label, used);
+      when(ru.getCachedUsed(anyString())).thenReturn(used);
       when(app.getAppAttemptResourceUsage()).thenReturn(ru);
+      when(app.getSchedulingResourceUsage()).thenReturn(ru);
 
       start = end + 1;
     }
@@ -637,6 +641,12 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
         when(leafQueue.getApplications()).thenReturn(apps);
         when(leafQueue.getAllApplications()).thenReturn(apps);
         OrderingPolicy<FiCaSchedulerApp> so = mock(OrderingPolicy.class);
+        String opName = conf.get(CapacitySchedulerConfiguration.PREFIX
+            + CapacitySchedulerConfiguration.ROOT + "." + getQueueName(q)
+            + ".ordering-policy", "fifo");
+        if (opName.equals("fair")) {
+          so = Mockito.spy(new FairOrderingPolicy<FiCaSchedulerApp>());
+        }
         when(so.getPreemptionIterator()).thenAnswer(new Answer() {
           public Object answer(InvocationOnMock invocation) {
             return apps.descendingIterator();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java
new file mode 100644
index 0000000..1678651
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java
@@ -0,0 +1,276 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
+
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.Before;
+import org.junit.Test;
+
+/*
+ * Test class for testing intra-queue preemption when the fair ordering policy
+ * is enabled on a capacity queue.
+ */
+public class TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering
+    extends ProportionalCapacityPreemptionPolicyMockFramework {
+  @Before
+  public void setup() {
+    super.setup();
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
+    policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
+  }
+
+  /*
+   * When the capacity scheduler fair ordering policy is enabled, preempt first
+   * from the application owned by the user that is the farthest over their
+   * user limit.
+   */
+  @Test
+  public void testIntraQueuePreemptionFairOrderingPolicyEnabledOneAppPerUser()
+      throws IOException {
+    // Enable FairOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fair");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 60 resources in queue a
+    // user2/app2 has 40 resources in queue a
+    // user3/app3 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // With FairOrderingPolicy enabled on queue a, all 20 resources should be
+    // preempted from app1
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,60,false,0,user1);" +
+            "a\t" // app2, user2 in a
+            + "(1,1,n1,,40,false,0,user2);" +
+            "a\t" // app3, user3 in a
+            + "(1,1,n1,,0,false,20,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(20)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+
+  /*
+   * When the capacity scheduler fifo ordering policy is enabled, preempt first
+   * from the youngest application until reduced to user limit, then preempt
+   * from next youngest app.
+   */
+  @Test
+  public void testIntraQueuePreemptionFifoOrderingPolicyEnabled()
+      throws IOException {
+    // Enable FifoOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fifo");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 60 resources in queue a
+    // user2/app2 has 40 resources in queue a
+    // user3/app3 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // With FifoOrderingPolicy enabled on queue a, the first 5 should come from
+    // the youngest app, app2, until app2 is reduced to the user limit of 35.
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,60,false,0,user1);" +
+            "a\t" // app2, user2 in a
+            + "(1,1,n1,,40,false,0,user2);" +
+            "a\t" // app3, user3 in a
+            + "(1,1,n1,,0,false,5,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(5)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+
+    // user1/app1 has 60 resources in queue a
+    // user2/app2 has 35 resources in queue a
+    // user3/app3 has 5 resources and is requesting 15 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // The next 15 should come from app1 even though app2 is younger since app2
+    // has already been reduced to its user limit.
+    appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,60,false,0,user1);" +
+            "a\t" // app2, user2 in a
+            + "(1,1,n1,,35,false,0,user2);" +
+            "a\t" // app3, user3 in a
+            + "(1,1,n1,,5,false,15,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(15)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+
+  /*
+   * When the capacity scheduler fair ordering policy is enabled, preempt first
+   * from the youngest application from the user that is the farthest over their
+   * user limit.
+   */
+  @Test
+  public void testIntraQueuePreemptionFairOrderingPolicyMulitipleAppsPerUser()
+      throws IOException {
+    // Enable FairOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fair");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 35 resources in queue a
+    // user1/app2 has 25 resources in queue a
+    // user2/app3 has 40 resources in queue a
+    // user3/app4 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // With FairOrderingPolicy enabled on queue a, all 20 resources should be
+    // preempted from app1 since it's the most over served app from the most
+    // over served user
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1 and app2, user1 in a
+            + "(1,1,n1,,35,false,0,user1);" +
+            "a\t"
+            + "(1,1,n1,,25,false,0,user1);" +
+            "a\t" // app3, user2 in a
+            + "(1,1,n1,,40,false,0,user2);" +
+            "a\t" // app4, user3 in a
+            + "(1,1,n1,,0,false,20,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(20)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+
+  /*
+   * When the capacity scheduler fifo ordering policy is enabled and a user has
+   * multiple apps, preempt first from the youngest application.
+   */
+  @Test
+  public void testIntraQueuePreemptionFifoOrderingPolicyMultipleAppsPerUser()
+      throws IOException {
+    // Enable FifoOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fifo");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 40 resources in queue a
+    // user1/app2 has 20 resources in queue a
+    // user3/app3 has 40 resources in queue a
+    // user4/app4 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,40,false,0,user1);" +
+        "a\t" // app2, user1 in a
+            + "(1,1,n1,,20,false,0,user1);" +
+        "a\t" // app3, user3 in a
+            + "(1,1,n1,,40,false,0,user3);" +
+        "a\t" // app4, user4 in a
+            + "(1,1,n1,,0,false,25,user4)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    // app3 is the younges and also over its user limit. 5 should be preempted
+    // from app3 until it comes down to user3's user limit.
+    verify(mDisp, times(5)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(3))));
+
+    // User1's app2 is its youngest. 19 should be preempted from app2, leaving
+    // only the AM
+    verify(mDisp, times(19)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+
+    // Preempt the remaining resource from User1's oldest app1.
+    verify(mDisp, times(1)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b28e6464
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b28e6464
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b28e6464

Branch: refs/heads/HDDS-4
Commit: b28e64646d462c8a9736edf74b28d9ff8c4f9982
Parents: 9086e1f
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 14 09:36:57 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu May 31 08:49:34 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   4 +
 .../common/src/main/resources/ozone-default.xml |  33 +++-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  |   5 +
 .../ksm/protocol/KeySpaceManagerProtocol.java   |   4 +
 .../protocolPB/KeySpaceManagerProtocolPB.java   |   5 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   3 +-
 .../ozone/TestOzoneConfigurationFields.java     |   3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java    | 169 +++++++++++++++----
 .../hadoop/ozone/ksm/KeySpaceManager.java       |  53 +++++-
 .../ozone/ksm/KeySpaceManagerHttpServer.java    |   5 +-
 10 files changed, 238 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index dec2c1c..a12d6ac 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -20,4 +20,8 @@ package org.apache.hadoop.hdds;
 public final class HddsConfigKeys {
   private HddsConfigKeys() {
   }
+  public static final String HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY = "hdds.ksm."
+      + "kerberos.keytab.file";
+  public static final String HDDS_KSM_KERBEROS_PRINCIPAL_KEY = "hdds.ksm"
+      + ".kerberos.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 7012946..9f7fc84 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1099,7 +1099,23 @@
     <name>ozone.scm.kerberos.principal</name>
     <value></value>
     <tag> OZONE, SECURITY</tag>
-    <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+    <description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
+  </property>
+
+  <property>
+    <name>hdds.ksm.kerberos.keytab.file</name>
+    <value></value>
+    <tag> HDDS, SECURITY</tag>
+    <description> The keytab file used by KSM daemon to login as its
+      service principal. The principal name is configured with
+      hdds.ksm.kerberos.principal.
+    </description>
+  </property>
+  <property>
+    <name>hdds.ksm.kerberos.principal</name>
+    <value></value>
+    <tag> HDDS, SECURITY</tag>
+    <description>The KSM service principal. Ex ksm/_HOST@REALM.COM</description>
   </property>
 
   <property>
@@ -1111,4 +1127,19 @@
     <value>/etc/security/keytabs/HTTP.keytab</value>
   </property>
 
+  <property>
+    <name>hdds.ksm.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      KSM http server kerberos principal.
+    </description>
+  </property>
+  <property>
+    <name>hdds.ksm.web.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/HTTP.keytab</value>
+    <description>
+      KSM http server kerberos keytab.
+    </description>
+  </property>
+
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
index 75cf613..d911bcb 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
@@ -78,4 +78,9 @@ public final class KSMConfigKeys {
   public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
       "ozone.key.deleting.limit.per.task";
   public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+  public static final String KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+      "hdds.ksm.web.authentication.kerberos.principal";
+  public static final String KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+      "hdds.ksm.web.authentication.kerberos.keytab";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
index 54862d3..de27108 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.ksm.protocol;
 
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
@@ -29,10 +30,13 @@ import org.apache.hadoop.ozone.protocol.proto
     .KeySpaceManagerProtocolProtos.OzoneAclInfo;
 import java.io.IOException;
 import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol to talk to KSM.
  */
+@KerberosInfo(
+    serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
 public interface KeySpaceManagerProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
index 8acca8a..71b9da0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
@@ -18,9 +18,12 @@
 package org.apache.hadoop.ozone.ksm.protocolPB;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.protocol.proto
     .KeySpaceManagerProtocolProtos.KeySpaceManagerService;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used to communicate with KSM.
@@ -28,6 +31,8 @@ import org.apache.hadoop.ozone.protocol.proto
 @ProtocolInfo(protocolName =
     "org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
     protocolVersion = 1)
+@KerberosInfo(
+    serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface KeySpaceManagerProtocolPB
     extends KeySpaceManagerService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index fbd9565..0455e19 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -346,7 +346,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
      *
      * @throws IOException
      */
-    private KeySpaceManager createKSM() throws IOException {
+    private KeySpaceManager createKSM()
+        throws IOException, AuthenticationException {
       configureKSM();
       KSMStorage ksmStore = new KSMStorage(conf);
       ksmStore.setClusterId(clusterId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 533a3b4..a1d3fd0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.TestConfigurationFieldsBase;
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
@@ -31,7 +32,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
     xmlFilename = new String("ozone-default.xml");
     configurationClasses =
         new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
-            KSMConfigKeys.class};
+            KSMConfigKeys.class, HddsConfigKeys.class};
     errorIfMissingConfigProps = true;
     errorIfMissingXmlProps = true;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 9c430ad..b917dfe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -26,24 +26,34 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.Properties;
 import java.util.UUID;
+import java.util.concurrent.Callable;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.ksm.KSMStorage;
+import org.apache.hadoop.ozone.ksm.KeySpaceManager;
 import org.apache.hadoop.security.KerberosAuthException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,13 +66,23 @@ public final class TestSecureOzoneCluster {
   private Logger LOGGER = LoggerFactory
       .getLogger(TestSecureOzoneCluster.class);
 
+  @Rule
+  public Timeout timeout = new Timeout(80000);
+
   private MiniKdc miniKdc;
   private OzoneConfiguration conf;
   private File workDir;
   private static Properties securityProperties;
   private File scmKeytab;
   private File spnegoKeytab;
+  private File ksmKeyTab;
   private String curUser;
+  private StorageContainerManager scm;
+  private KeySpaceManager ksm;
+
+  private static String clusterId;
+  private static String scmId;
+  private static String ksmId;
 
   @Before
   public void init() {
@@ -71,6 +91,10 @@ public final class TestSecureOzoneCluster {
       startMiniKdc();
       setSecureConfig(conf);
       createCredentialsInKDC(conf, miniKdc);
+
+      clusterId = UUID.randomUUID().toString();
+      scmId = UUID.randomUUID().toString();
+      ksmId = UUID.randomUUID().toString();
     } catch (IOException e) {
       LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
     } catch (Exception e) {
@@ -78,12 +102,30 @@ public final class TestSecureOzoneCluster {
     }
   }
 
+  @After
+  public void stop() {
+    try {
+      stopMiniKdc();
+      if (scm != null) {
+        scm.stop();
+      }
+      if (ksm != null) {
+        ksm.stop();
+      }
+    } catch (Exception e) {
+      LOGGER.error("Failed to stop TestSecureOzoneCluster", e);
+    }
+  }
+
   private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
       throws Exception {
     createPrincipal(scmKeytab,
         conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
     createPrincipal(spnegoKeytab,
-        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
+        conf.get(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL));
+    createPrincipal(ksmKeyTab,
+        conf.get(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY));
   }
 
   private void createPrincipal(File keytab, String... principal)
@@ -99,6 +141,10 @@ public final class TestSecureOzoneCluster {
     miniKdc.start();
   }
 
+  private void stopMiniKdc() throws Exception {
+    miniKdc.stop();
+  }
+
   private void setSecureConfig(Configuration conf) throws IOException {
     conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
     String host = KerberosUtil.getLocalHostName();
@@ -114,59 +160,56 @@ public final class TestSecureOzoneCluster {
     conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
         "HTTP_SCM/" + host + "@" + realm);
 
+    conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+        "ksm/" + host + "@" + realm);
+    conf.set(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL,
+        "HTTP_KSM/" + host + "@" + realm);
+
     scmKeytab = new File(workDir, "scm.keytab");
     spnegoKeytab = new File(workDir, "http.keytab");
+    ksmKeyTab = new File(workDir, "ksm.keytab");
 
     conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
         scmKeytab.getAbsolutePath());
     conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
         spnegoKeytab.getAbsolutePath());
+    conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+        ksmKeyTab.getAbsolutePath());
 
   }
 
   @Test
   public void testSecureScmStartupSuccess() throws Exception {
+
+    initSCM();
+    scm = StorageContainerManager.createSCM(null, conf);
+    //Reads the SCM Info from SCM instance
+    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+    Assert.assertEquals(clusterId, scmInfo.getClusterId());
+    Assert.assertEquals(scmId, scmInfo.getScmId());
+  }
+
+  private void initSCM()
+      throws IOException, AuthenticationException {
     final String path = GenericTestUtils
         .getTempPath(UUID.randomUUID().toString());
     Path scmPath = Paths.get(path, "scm-meta");
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
     conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
     SCMStorage scmStore = new SCMStorage(conf);
-    String clusterId = UUID.randomUUID().toString();
-    String scmId = UUID.randomUUID().toString();
     scmStore.setClusterId(clusterId);
     scmStore.setScmId(scmId);
     // writes the version file properties
     scmStore.initialize();
-    StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
-    //Reads the SCM Info from SCM instance
-    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
-    Assert.assertEquals(clusterId, scmInfo.getClusterId());
-    Assert.assertEquals(scmId, scmInfo.getScmId());
   }
 
   @Test
   public void testSecureScmStartupFailure() throws Exception {
-    final String path = GenericTestUtils
-        .getTempPath(UUID.randomUUID().toString());
-    Path scmPath = Paths.get(path, "scm-meta");
-
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
-        "scm@" + miniKdc.getRealm());
+    initSCM();
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "kerberos");
 
-    SCMStorage scmStore = new SCMStorage(conf);
-    String clusterId = UUID.randomUUID().toString();
-    String scmId = UUID.randomUUID().toString();
-    scmStore.setClusterId(clusterId);
-    scmStore.setScmId(scmId);
-    // writes the version file properties
-    scmStore.initialize();
     LambdaTestUtils.intercept(IOException.class,
         "Running in secure mode, but config doesn't have a keytab",
         () -> {
@@ -178,28 +221,82 @@ public final class TestSecureOzoneCluster {
     conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
         "/etc/security/keytabs/scm.keytab");
 
+    testCommonKerberosFailures(
+        () -> StorageContainerManager.createSCM(null, conf));
+
+  }
+
+  private void testCommonKerberosFailures(Callable callable) throws Exception {
     LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
-            + "to login: for principal:",
-        () -> {
-          StorageContainerManager.createSCM(null, conf);
-        });
+        + "to login: for principal:", callable);
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "OAuth2");
 
     LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
             + " attribute value for hadoop.security.authentication of OAuth2",
-        () -> {
-          StorageContainerManager.createSCM(null, conf);
-        });
+        callable);
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "KERBEROS_SSL");
     LambdaTestUtils.intercept(AuthenticationException.class,
-        "KERBEROS_SSL authentication method not support.",
-        () -> {
-          StorageContainerManager.createSCM(null, conf);
-        });
+        "KERBEROS_SSL authentication method not",
+        callable);
+  }
 
+  /**
+   * Tests the secure KSM Initialization Failure.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSecureKsmInitializationFailure() throws Exception {
+    initSCM();
+    // Create a secure SCM instance as ksm client will connect to it
+    scm = StorageContainerManager.createSCM(null, conf);
+
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    KSMStorage ksmStore = new KSMStorage(conf);
+    ksmStore.setClusterId("testClusterId");
+    ksmStore.setScmId("testScmId");
+    // writes the version file properties
+    ksmStore.initialize();
+    conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+        "non-existent-user@EXAMPLE.com");
+    testCommonKerberosFailures(() -> KeySpaceManager.createKSM(null, conf));
+  }
+
+  /**
+   * Tests the secure KSM Initialization success.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSecureKsmInitializationSuccess() throws Exception {
+    initSCM();
+    // Create a secure SCM instance as ksm client will connect to it
+    scm = StorageContainerManager.createSCM(null, conf);
+    LogCapturer logs = LogCapturer.captureLogs(KeySpaceManager.LOG);
+    GenericTestUtils
+        .setLogLevel(LoggerFactory.getLogger(KeySpaceManager.class.getName()),
+            org.slf4j.event.Level.INFO);
+
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    Path metaDirPath = Paths.get(path, "ksm-meta");
+
+    KSMStorage ksmStore = new KSMStorage(conf);
+    ksmStore.setClusterId("testClusterId");
+    ksmStore.setScmId("testScmId");
+    // writes the version file properties
+    ksmStore.initialize();
+    try {
+      ksm = KeySpaceManager.createKSM(null, conf);
+    } catch (Exception ex) {
+      // Expects timeout failure from scmClient in KSM but KSM user login via
+      // kerberos should succeed
+      Assert.assertTrue(logs.getOutput().contains("KSM login successful"));
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index dc8fc91..be747d2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
 import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
 import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
@@ -60,7 +61,10 @@ import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.StringUtils;
 
@@ -84,6 +88,8 @@ import java.util.List;
 import java.util.Map;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
     .OZONE_KSM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
@@ -103,7 +109,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
 @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
 public final class KeySpaceManager extends ServiceRuntimeInfoImpl
     implements KeySpaceManagerProtocol, KSMMXBean {
-  private static final Logger LOG =
+  public static final Logger LOG =
       LoggerFactory.getLogger(KeySpaceManager.class);
 
   private static final String USAGE =
@@ -154,8 +160,8 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
   private KeySpaceManager(OzoneConfiguration conf) throws IOException {
     Preconditions.checkNotNull(conf);
     configuration = conf;
+
     ksmStorage = new KSMStorage(conf);
-    scmBlockClient = getScmBlockClient(configuration);
     scmContainerClient = getScmContainerClient(configuration);
     if (ksmStorage.getState() != StorageState.INITIALIZED) {
       throw new KSMException("KSM not initialized.",
@@ -163,6 +169,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
     }
 
     // verifies that the SCM info in the KSM Version file is correct.
+    scmBlockClient = getScmBlockClient(configuration);
     ScmInfo scmInfo = scmBlockClient.getScmInfo();
     if (!(scmInfo.getClusterId().equals(ksmStorage.getClusterID()) && scmInfo
         .getScmId().equals(ksmStorage.getScmId()))) {
@@ -195,6 +202,34 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
   }
 
   /**
+   * Login KSM service user if security and Kerberos are enabled.
+   *
+   * @param  conf
+   * @throws IOException, AuthenticationException
+   */
+  private static void loginKSMUser(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
+
+    if (SecurityUtil.getAuthenticationMethod(conf).equals
+        (AuthenticationMethod.KERBEROS)) {
+      LOG.debug("Ozone security is enabled. Attempting login for KSM user. "
+              + "Principal: {},keytab: {}", conf.get(HDDS_KSM_KERBEROS_PRINCIPAL_KEY),
+          conf.get(HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY));
+
+      UserGroupInformation.setConfiguration(conf);
+
+      InetSocketAddress socAddr = getKsmAddress(conf);
+      SecurityUtil.login(conf, HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+          HDDS_KSM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+    } else {
+      throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+          (conf) + " authentication method not supported. KSM user login "
+          + "failed.");
+    }
+    LOG.info("KSM login successful.");
+  }
+
+  /**
    * Create a scm block client, used by putKey() and getKey().
    *
    * @return {@link ScmBlockLocationProtocol}
@@ -338,7 +373,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
    */
 
   public static KeySpaceManager createKSM(String[] argv,
-      OzoneConfiguration conf) throws IOException {
+      OzoneConfiguration conf) throws IOException, AuthenticationException {
     if (!isHddsEnabled(conf)) {
       System.err.println("KSM cannot be started in secure mode or when " +
           OZONE_ENABLED + " is set to false");
@@ -350,6 +385,10 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
       terminate(1);
       return null;
     }
+    // Authenticate KSM if security is enabled
+    if (conf.getBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true)) {
+      loginKSMUser(conf);
+    }
     switch (startOpt) {
     case CREATEOBJECTSTORE:
       terminate(ksmInit(conf) ? 0 : 1);
@@ -444,7 +483,13 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
     metadataManager.start();
     keyManager.start();
     ksmRpcServer.start();
-    httpServer.start();
+    try {
+      httpServer.start();
+    } catch (Exception ex) {
+      // Allow KSM to start as Http Server failure is not fatal.
+      LOG.error("KSM HttpServer failed to start.", ex);
+    }
+
     registerMXBean();
     setStartTime();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28e6464/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
index 478804b..a0d15b3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.ksm;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.server.BaseHttpServer;
 
@@ -65,11 +64,11 @@ public class KeySpaceManagerHttpServer extends BaseHttpServer {
   }
 
   @Override protected String getKeytabFile() {
-    return KSMConfigKeys.OZONE_KSM_KEYTAB_FILE;
+    return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
   }
 
   @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+    return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
   }
 
   @Override protected String getEnabledKey() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HADOOP-15480 AbstractS3GuardToolTestBase.testDiffCommand fails when using dynamo (Gabor Bota)

Posted by xy...@apache.org.
HADOOP-15480 AbstractS3GuardToolTestBase.testDiffCommand fails when using dynamo (Gabor Bota)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f6769f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f6769f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f6769f7

Branch: refs/heads/HDDS-4
Commit: 5f6769f7964ff002b6c04a95893b5baeb424b6db
Parents: 135941e
Author: Aaron Fabbri <fa...@apache.org>
Authored: Tue May 29 19:20:22 2018 -0700
Committer: Aaron Fabbri <fa...@apache.org>
Committed: Tue May 29 19:20:22 2018 -0700

----------------------------------------------------------------------
 .../s3guard/AbstractS3GuardToolTestBase.java    | 37 +++++++++++++-------
 .../s3a/s3guard/ITestS3GuardToolDynamoDB.java   |  5 ---
 .../fs/s3a/s3guard/ITestS3GuardToolLocal.java   |  5 ---
 3 files changed, 25 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6769f7/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 2b43810..7d75f52 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -25,6 +25,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
+import java.net.URI;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
@@ -32,6 +33,8 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.util.StopWatch;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.FileSystem;
 import org.junit.Assume;
 import org.junit.Test;
 
@@ -48,6 +51,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 
+import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL;
+import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_BAD_STATE;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
@@ -65,6 +70,7 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
   private static final int PRUNE_MAX_AGE_SECS = 2;
 
   private MetadataStore ms;
+  private S3AFileSystem rawFs;
 
   protected static void expectResult(int expected,
       String message,
@@ -129,28 +135,34 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
     return ms;
   }
 
-  protected abstract MetadataStore newMetadataStore();
-
   @Override
   public void setup() throws Exception {
     super.setup();
     S3ATestUtils.assumeS3GuardState(true, getConfiguration());
-    ms = newMetadataStore();
-    ms.initialize(getFileSystem());
+    ms = getFileSystem().getMetadataStore();
+
+    // Also create a "raw" fs without any MetadataStore configured
+    Configuration conf = new Configuration(getConfiguration());
+    conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL);
+    URI fsUri = getFileSystem().getUri();
+    rawFs = (S3AFileSystem) FileSystem.newInstance(fsUri, conf);
   }
 
   @Override
   public void teardown() throws Exception {
     super.teardown();
     IOUtils.cleanupWithLogger(LOG, ms);
+    IOUtils.closeStream(rawFs);
   }
 
   protected void mkdirs(Path path, boolean onS3, boolean onMetadataStore)
       throws IOException {
+    Preconditions.checkArgument(onS3 || onMetadataStore);
+    // getFileSystem() returns an fs with MetadataStore configured
+    S3AFileSystem fs = onMetadataStore ? getFileSystem() : rawFs;
     if (onS3) {
-      getFileSystem().mkdirs(path);
-    }
-    if (onMetadataStore) {
+      fs.mkdirs(path);
+    } else if (onMetadataStore) {
       S3AFileStatus status = new S3AFileStatus(true, path, OWNER);
       ms.put(new PathMetadata(status));
     }
@@ -178,13 +190,14 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
    */
   protected void createFile(Path path, boolean onS3, boolean onMetadataStore)
       throws IOException {
+    Preconditions.checkArgument(onS3 || onMetadataStore);
+    // getFileSystem() returns an fs with MetadataStore configured
+    S3AFileSystem fs = onMetadataStore ? getFileSystem() : rawFs;
     if (onS3) {
-      ContractTestUtils.touch(getFileSystem(), path);
-    }
-
-    if (onMetadataStore) {
+      ContractTestUtils.touch(fs, path);
+    } else if (onMetadataStore) {
       S3AFileStatus status = new S3AFileStatus(100L, System.currentTimeMillis(),
-          getFileSystem().qualify(path), 512L, "hdfs");
+          fs.qualify(path), 512L, "hdfs");
       putFile(ms, status);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6769f7/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
index 821bba5..1a59bf1 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
@@ -48,11 +48,6 @@ import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.*;
 public class ITestS3GuardToolDynamoDB extends AbstractS3GuardToolTestBase {
 
   @Override
-  protected MetadataStore newMetadataStore() {
-    return new DynamoDBMetadataStore();
-  }
-
-  @Override
   public void setup() throws Exception {
     super.setup();
     Assume.assumeTrue("Test only applies when DynamoDB is used for S3Guard",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6769f7/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java
index a40c7a5..f5c4b03 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java
@@ -52,11 +52,6 @@ public class ITestS3GuardToolLocal extends AbstractS3GuardToolTestBase {
   private static final String[] ABORT_FORCE_OPTIONS = new String[] {"-abort",
       "-force", "-verbose"};
 
-  @Override
-  protected MetadataStore newMetadataStore() {
-    return new LocalMetadataStore();
-  }
-
   @Test
   public void testImportCommand() throws Exception {
     S3AFileSystem fs = getFileSystem();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8605a385
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8605a385
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8605a385

Branch: refs/heads/HDDS-4
Commit: 8605a38514b4f7a2a549c7ecf8e1421e61bb4d67
Parents: 2a9652e
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 25 19:43:33 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 25 19:43:33 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java     | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8605a385/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
index 50d1e75..6da46de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -25,6 +26,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,7 +41,9 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
     Configuration conf = new HdfsConfiguration();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
         FileSystemContractBaseTest.TEST_UMASK);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    File basedir = GenericTestUtils.getRandomizedTestDir();
+    cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(2)
+        .build();
     fs = cluster.getFileSystem();
     defaultWorkingDirectory = "/user/" + 
            UserGroupInformation.getCurrentUser().getShortUserName();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-13582. Improve backward compatibility for HDFS-13176 (WebHdfs file path gets truncated when having semicolon (; ) inside). Contributed by Zsolt Venczel.

Posted by xy...@apache.org.
HDFS-13582. Improve backward compatibility for HDFS-13176 (WebHdfs file path gets truncated when having semicolon (;) inside). Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1361030e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1361030e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1361030e

Branch: refs/heads/HDDS-4
Commit: 1361030e59d7557a2bffac0ea8df116ce2eaae4a
Parents: 6bc92e3
Author: Sean Mackrory <ma...@apache.org>
Authored: Thu May 31 07:56:57 2018 -0600
Committer: Sean Mackrory <ma...@apache.org>
Committed: Thu May 31 07:59:21 2018 -0600

----------------------------------------------------------------------
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      | 11 +++-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  | 55 ++++++++++++++++++++
 2 files changed, 65 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1361030e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index f7deab9..673acd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -146,6 +146,8 @@ public class WebHdfsFileSystem extends FileSystem
   public static final String EZ_HEADER = "X-Hadoop-Accept-EZ";
   public static final String FEFINFO_HEADER = "X-Hadoop-feInfo";
 
+  public static final String SPECIAL_FILENAME_CHARACTERS_REGEX = ".*[;+%].*";
+
   /**
    * Default connection factory may be overridden in tests to use smaller
    * timeout values
@@ -606,8 +608,10 @@ public class WebHdfsFileSystem extends FileSystem
     if (fspath != null) {
       URI fspathUri = fspath.toUri();
       String fspathUriDecoded = fspathUri.getPath();
+      boolean pathAlreadyEncoded = false;
       try {
         fspathUriDecoded = URLDecoder.decode(fspathUri.getPath(), "UTF-8");
+        pathAlreadyEncoded = true;
       } catch (IllegalArgumentException ex) {
         LOG.trace("Cannot decode URL encoded file", ex);
       }
@@ -617,7 +621,12 @@ public class WebHdfsFileSystem extends FileSystem
         StringBuilder fsPathEncodedItems = new StringBuilder();
         for (String fsPathItem : fspathItems) {
           fsPathEncodedItems.append("/");
-          fsPathEncodedItems.append(URLEncoder.encode(fsPathItem, "UTF-8"));
+          if (fsPathItem.matches(SPECIAL_FILENAME_CHARACTERS_REGEX) ||
+              pathAlreadyEncoded) {
+            fsPathEncodedItems.append(URLEncoder.encode(fsPathItem, "UTF-8"));
+          } else {
+            fsPathEncodedItems.append(fsPathItem);
+          }
         }
         encodedFSPath = new Path(fspathUri.getScheme(),
                 fspathUri.getAuthority(), fsPathEncodedItems.substring(1));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1361030e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
index ecd53f6..02a68ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
@@ -414,4 +414,59 @@ public class TestWebHdfsUrl {
     }
   }
 
+  private static final String BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME =
+          "specialFile ?\"\\()[]_-=&,{}#'`~!@$^*|<>.";
+
+  @Test
+  public void testWebHdfsBackwardCompatibleSpecialCharacterFile()
+          throws Exception {
+
+    assertFalse(BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME
+            .matches(WebHdfsFileSystem.SPECIAL_FILENAME_CHARACTERS_REGEX));
+
+    UserGroupInformation ugi =
+            UserGroupInformation.createRemoteUser("test-user");
+    ugi.setAuthenticationMethod(KERBEROS);
+    UserGroupInformation.setLoginUser(ugi);
+
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final Path dir = new Path("/testWebHdfsSpecialCharacterFile");
+
+    final short numDatanodes = 1;
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+            .numDataNodes(numDatanodes)
+            .build();
+    try {
+      cluster.waitActive();
+      final FileSystem fs = WebHdfsTestUtil
+              .getWebHdfsFileSystem(conf, WebHdfs.SCHEME);
+
+      //create a file
+      final long length = 1L << 10;
+      final Path file1 = new Path(dir,
+              BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME);
+
+      DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
+
+      //get file status and check that it was written properly.
+      final FileStatus s1 = fs.getFileStatus(file1);
+      assertEquals("Write failed for file " + file1, length, s1.getLen());
+
+      boolean found = false;
+      RemoteIterator<LocatedFileStatus> statusRemoteIterator =
+              fs.listFiles(dir, false);
+      while (statusRemoteIterator.hasNext()) {
+        LocatedFileStatus locatedFileStatus = statusRemoteIterator.next();
+        if (locatedFileStatus.isFile() &&
+                BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME
+                        .equals(locatedFileStatus.getPath().getName())) {
+          found = true;
+        }
+      }
+      assertFalse("Could not find file with special character", !found);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HDFS-13632. Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for TestDFSAdminWithHA. Contributed by Anbang Hu.

Posted by xy...@apache.org.
HDFS-13632. Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for TestDFSAdminWithHA. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8197b9b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8197b9b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8197b9b5

Branch: refs/heads/HDDS-4
Commit: 8197b9b56040113806bdf328bbee68e95dd0aadd
Parents: e44c084
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 30 10:02:19 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 30 10:13:52 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java | 10 ++++++++--
 .../org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java  |  4 +++-
 2 files changed, 11 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8197b9b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 1005f7f..f1f74dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -49,6 +49,7 @@ public class MiniQJMHACluster {
     private int numNNs = 2;
     private final MiniDFSCluster.Builder dfsBuilder;
     private boolean forceRemoteEditsOnly = false;
+    private String baseDir;
 
     public Builder(Configuration conf) {
       this.conf = conf;
@@ -69,6 +70,11 @@ public class MiniQJMHACluster {
       this.startOpt = startOpt;
     }
 
+    public Builder baseDir(String d) {
+      this.baseDir = d;
+      return this;
+    }
+
     public Builder setNumNameNodes(int nns) {
       this.numNNs = nns;
       return this;
@@ -104,8 +110,8 @@ public class MiniQJMHACluster {
         basePort = 10000 + RANDOM.nextInt(1000) * 4;
         LOG.info("Set MiniQJMHACluster basePort to " + basePort);
         // start 3 journal nodes
-        journalCluster = new MiniJournalCluster.Builder(conf).format(true)
-            .build();
+        journalCluster = new MiniJournalCluster.Builder(conf)
+            .baseDir(builder.baseDir).format(true).build();
         journalCluster.waitActive();
         journalCluster.setNamenodeSharedEditsConf(NAMESERVICE);
         URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8197b9b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index b21084e..aa4d481 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
 
@@ -85,7 +86,8 @@ public class TestDFSAdminWithHA {
     conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
         security);
-    cluster = new MiniQJMHACluster.Builder(conf).build();
+    String baseDir = GenericTestUtils.getRandomizedTempPath();
+    cluster = new MiniQJMHACluster.Builder(conf).baseDir(baseDir).build();
     setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(),
         cluster.getDfsCluster().getNameNode(1).getHostAndPort());
     cluster.getDfsCluster().getNameNode(0).getHostAndPort();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HADOOP-15477. Make unjar in RunJar overrideable

Posted by xy...@apache.org.
HADOOP-15477. Make unjar in RunJar overrideable

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d14e26b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d14e26b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d14e26b3

Branch: refs/heads/HDDS-4
Commit: d14e26b31fe46fb47a8e99a212c70016fd15a4d9
Parents: 0cf6e87
Author: Johan Gustavsson <jo...@treasure-data.com>
Authored: Mon May 28 17:29:59 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 28 17:29:59 2018 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/util/RunJar.java     | 17 ++++++---
 .../java/org/apache/hadoop/util/TestRunJar.java | 37 ++++++++++++++++++--
 .../org/apache/hadoop/streaming/StreamJob.java  |  4 ++-
 3 files changed, 51 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 9dd770c..f1b643c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -76,7 +76,11 @@ public class RunJar {
    */
   public static final String HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES =
       "HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES";
-
+  /**
+   * Environment key for disabling unjar in client code.
+   */
+  public static final String HADOOP_CLIENT_SKIP_UNJAR =
+      "HADOOP_CLIENT_SKIP_UNJAR";
   /**
    * Buffer size for copy the content of compressed file to new file.
    */
@@ -93,7 +97,7 @@ public class RunJar {
    * @throws IOException if an I/O error has occurred or toDir
    * cannot be created and does not already exist
    */
-  public static void unJar(File jarFile, File toDir) throws IOException {
+  public void unJar(File jarFile, File toDir) throws IOException {
     unJar(jarFile, toDir, MATCH_ANY);
   }
 
@@ -292,8 +296,9 @@ public class RunJar {
           }
         }, SHUTDOWN_HOOK_PRIORITY);
 
-
-    unJar(file, workDir);
+    if (!skipUnjar()) {
+      unJar(file, workDir);
+    }
 
     ClassLoader loader = createClassLoader(file, workDir);
 
@@ -364,6 +369,10 @@ public class RunJar {
     return Boolean.parseBoolean(System.getenv(HADOOP_USE_CLIENT_CLASSLOADER));
   }
 
+  boolean skipUnjar() {
+    return Boolean.parseBoolean(System.getenv(HADOOP_CLIENT_SKIP_UNJAR));
+  }
+
   String getHadoopClasspath() {
     return System.getenv(HADOOP_CLASSPATH);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 19485d6..ea07b97 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -17,10 +17,14 @@
  */
 package org.apache.hadoop.util;
 
+import static org.apache.hadoop.util.RunJar.MATCH_ANY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
@@ -99,7 +103,7 @@ public class TestRunJar {
 
     // Unjar everything
     RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
-                 unjarDir);
+                 unjarDir, MATCH_ANY);
     assertTrue("foobar unpacked",
                new File(unjarDir, TestRunJar.FOOBAR_TXT).exists());
     assertTrue("foobaz unpacked",
@@ -177,7 +181,7 @@ public class TestRunJar {
 
     // Unjar everything
     RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
-            unjarDir);
+            unjarDir, MATCH_ANY);
 
     String failureMessage = "Last modify time was lost during unJar";
     assertEquals(failureMessage, MOCKED_NOW, new File(unjarDir, TestRunJar.FOOBAR_TXT).lastModified());
@@ -221,5 +225,34 @@ public class TestRunJar {
     // run RunJar
     runJar.run(args);
     // it should not throw an exception
+    verify(runJar, times(1)).unJar(any(File.class), any(File.class));
+  }
+
+  @Test
+  public void testClientClassLoaderSkipUnjar() throws Throwable {
+    RunJar runJar = spy(new RunJar());
+    // enable the client classloader
+    when(runJar.useClientClassLoader()).thenReturn(true);
+    // set the system classes and blacklist the test main class and the test
+    // third class so they can be loaded by the application classloader
+    String mainCls = ClassLoaderCheckMain.class.getName();
+    String thirdCls = ClassLoaderCheckThird.class.getName();
+    String systemClasses = "-" + mainCls + "," +
+        "-" + thirdCls + "," +
+        ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
+    when(runJar.getSystemClasses()).thenReturn(systemClasses);
+
+    // create the test jar
+    File testJar = JarFinder.makeClassLoaderTestJar(this.getClass(),
+        TEST_ROOT_DIR, TEST_JAR_2_NAME, BUFF_SIZE, mainCls, thirdCls);
+    // form the args
+    String[] args = new String[3];
+    args[0] = testJar.getAbsolutePath();
+    args[1] = mainCls;
+    when(runJar.skipUnjar()).thenReturn(true);
+    // run RunJar
+    runJar.run(args);
+    // it should not throw an exception
+    verify(runJar, times(0)).unJar(any(File.class), any(File.class));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
index 9b09729..1fe8710 100644
--- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
+++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
@@ -72,6 +72,8 @@ import org.apache.hadoop.util.RunJar;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 
+import static org.apache.hadoop.util.RunJar.MATCH_ANY;
+
 /** All the client-side work happens here.
  * (Jar packaging, MapRed job submission and monitoring)
  */
@@ -1006,7 +1008,7 @@ public class StreamJob implements Tool {
     if (jar_ != null && isLocalHadoop()) {
       // getAbs became required when shell and subvm have different working dirs...
       File wd = new File(".").getAbsoluteFile();
-      RunJar.unJar(new File(jar_), wd);
+      RunJar.unJar(new File(jar_), wd, MATCH_ANY);
     }
 
     // if jobConf_ changes must recreate a JobClient


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d686904
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d686904
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d686904

Branch: refs/heads/HDDS-4
Commit: 5d686904166270eb9a74d82ecac40dd4d77b97ed
Parents: 49cd77d
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri May 18 13:09:17 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu May 31 08:49:34 2018 -0700

----------------------------------------------------------------------
 .../src/test/compose/compose-secure/.env        | 17 ++++
 .../compose/compose-secure/docker-compose.yaml  | 66 ++++++++++++++
 .../test/compose/compose-secure/docker-config   | 66 ++++++++++++++
 .../acceptance/ozone-secure.robot               | 95 ++++++++++++++++++++
 .../hadoop/ozone/client/rest/RestClient.java    |  4 +-
 .../hadoop/ozone/client/rpc/RpcClient.java      |  6 +-
 6 files changed, 248 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d686904/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
new file mode 100644
index 0000000..3254735
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONEDIR=../../../hadoop-dist/target/ozone
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d686904/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
new file mode 100644
index 0000000..2661163
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   ozone.kdc:
+      image: ahadoop/kdc:v1
+   namenode:
+      image: ahadoop/ozone:v1
+      hostname: namenode
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9000:9000
+      environment:
+          ENSURE_NAMENODE_DIR: /data/namenode
+      env_file:
+         - ./docker-config
+      command: ["/opt/hadoop/bin/hdfs","namenode"]
+   datanode:
+      image: ahadoop/ozone:v1
+      hostname: datanode
+      volumes:
+        - ${OZONEDIR}:/opt/hadoop
+      ports:
+        - 9874
+      env_file:
+        - ./docker-config
+      command: ["/opt/hadoop/bin/ozone","datanode"]
+   ksm:
+      image: ahadoop/ozone:v1
+      hostname: ksm
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9874:9874
+      environment:
+         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+      env_file:
+          - ./docker-config
+      command: ["/opt/hadoop/bin/ozone","ksm"]
+   scm:
+      image: ahadoop/ozone:v1
+      hostname: scm
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9876:9876
+      env_file:
+          - ./docker-config
+      environment:
+          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+      command: ["/opt/hadoop/bin/ozone","scm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d686904/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
new file mode 100644
index 0000000..678c75a
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.enabled=True
+OZONE-SITE.XML_hdds.scm.datanode.id=/data/datanode.id
+OZONE-SITE.XML_hdds.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.handler.type=distributed
+OZONE-SITE.XML_hdds.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
+OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
+OZONE-SITE.XML_ozone.ksm.kerberos.principal=ksm/ksm@EXAMPLE.COM
+OZONE-SITE.XML_ozone.ksm.kerberos.keytab.file=/etc/security/keytabs/ksm.keytab
+OZONE-SITE.XML_ozone.security.enabled=true
+OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.principal=HTTP/scm@EXAMPLE.COM
+OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.principal=HTTP/ksm@EXAMPLE.COM
+OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.scm.client.address=scm
+HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
+HDFS-SITE.XML_dfs.block.access.token.enable=true
+HDFS-SITE.XML_dfs.namenode.kerberos.principal=nn/namenode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.namenode.keytab.file=/etc/security/keytabs/nn.keytab
+HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/datanode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
+HDFS-SITE.XML_dfs.namenode.kerberos.internal.spnego.principal=HTTP/namenode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
+HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
+HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
+HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
+CORE-SITE.XML_dfs.data.transfer.protection=authentication
+CORE-SITE.XML_hadoop.security.authentication=kerberos
+CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/
+LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
+LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+
+OZONE_DATANODE_SECURE_USER=root
+CONF_DIR=/etc/security/keytabs
+KERBEROS_KEYTABS=dn nn ksm scm HTTP testuser
+KERBEROS_KEYSTORES=hadoop
+KERBEROS_SERVER=ozone.kdc
+JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
+JSVC_HOME=/usr/bin
+SLEEP_SECONDS=10
+KERBEROS_ENABLED=true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d686904/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
new file mode 100644
index 0000000..4a78980
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
@@ -0,0 +1,95 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Smoke test to start cluster with docker-compose environments.
+Library             OperatingSystem
+Suite Setup         Startup Ozone Cluster
+Suite Teardown      Teardown Ozone Cluster
+
+*** Variables ***
+${COMMON_REST_HEADER}   -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H  "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
+${version}
+
+*** Test Cases ***
+
+Daemons are running
+    Is daemon running           ksm
+    Is daemon running           scm
+    Is daemon running           datanode
+    Is daemon running           ozone.kdc
+
+Check if datanode is connected to the scm
+    Wait Until Keyword Succeeds     3min    5sec    Have healthy datanodes   1
+
+Test rest interface
+    ${result} =     Execute on      0   datanode        curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
+                    Should contain      ${result}       201 Created
+    ${result} =     Execute on      0   datanode        curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
+                    Should contain      ${result}       201 Created
+    ${result} =     Execute on      0   datanode        curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
+                    Should contain      ${result}       200 OK
+    ${result} =     Execute on      0   datanode        curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
+                    Should contain      ${result}       200 OK
+
+Test ozone cli
+    ${result} =     Execute on      1   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+                    Should contain      ${result}       Client cannot authenticate via
+                    # Authenticate testuser
+                    Execute on      0   datanode        kinit -k testuser/datanode@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
+                    Execute on      0   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on      0   datanode        ozone oz -listVolume o3://ksm/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on      0   datanode        ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
+    ${result} =     Execute on      0   datanode        ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+
+*** Keywords ***
+
+Startup Ozone Cluster
+    ${rc}       ${output} =                 Run docker compose        0     down
+    ${rc}       ${output} =                 Run docker compose        0     up -d
+    Should Be Equal As Integers 	          ${rc} 	                  0
+    Wait Until Keyword Succeeds             3min    10sec    Is Daemon started   ksm     KSM is listening
+
+Teardown Ozone Cluster
+    Run docker compose      0           down
+
+Is daemon running
+    [arguments]             ${name}
+    ${result} =             Run                     docker ps
+    Should contain          ${result}               _${name}_1
+
+Is Daemon started
+    [arguments]     ${name}             ${expression}
+    ${rc}           ${result} =         Run docker compose      0         logs
+    Should contain  ${result}           ${expression}
+
+Have healthy datanodes
+    [arguments]         ${requirednodes}
+    ${result} =         Execute on     0     scm                 curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
+    Should Be Equal     ${result}           ${requirednodes}
+
+Execute on
+    [arguments]     ${expected_rc}      ${componentname}      ${command}
+    ${rc}           ${return} =         Run docker compose    ${expected_rc}     exec ${componentname} ${command}
+    [return]        ${return}
+
+Run docker compose
+    [arguments]                     ${expected_rc}              ${command}
+                                    Set Environment Variable    OZONEDIR                               ${basedir}/hadoop-dist/target/ozone
+    ${rc}                           ${output} =                 Run And Return Rc And Output           docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml ${command}
+    Should Be Equal As Integers     ${rc}                       ${expected_rc}
+    [return]                        ${rc}                       ${output}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d686904/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index 1169820..3f3b41d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -111,7 +111,7 @@ public class RestClient implements ClientProtocol {
     try {
       Preconditions.checkNotNull(conf);
       this.conf = conf;
-
+      this.ugi = UserGroupInformation.getCurrentUser();
       long socketTimeout = conf.getTimeDuration(
           OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
           OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
@@ -147,7 +147,7 @@ public class RestClient implements ClientProtocol {
                   .setConnectTimeout(Math.toIntExact(connectionTimeout))
                   .build())
           .build();
-      this.ugi = UserGroupInformation.getCurrentUser();
+
       this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
           KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d686904/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 43b94a1..f8e92b2 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -122,8 +122,7 @@ public class RpcClient implements ClientProtocol {
     this.keySpaceManagerClient =
         new KeySpaceManagerProtocolClientSideTranslatorPB(
             RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion,
-                ksmAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
+                ksmAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf),
                 Client.getRpcTimeout(conf)));
 
     long scmVersion =
@@ -134,8 +133,7 @@ public class RpcClient implements ClientProtocol {
     this.storageContainerLocationClient =
         new StorageContainerLocationProtocolClientSideTranslatorPB(
             RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
-                scmAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
+                scmAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf),
                 Client.getRpcTimeout(conf)));
 
     this.xceiverClientManager = new XceiverClientManager(conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by Sunil Govindan.

Posted by xy...@apache.org.
MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by  Sunil Govindan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88cbe57c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88cbe57c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88cbe57c

Branch: refs/heads/HDDS-4
Commit: 88cbe57c069a1d2dd3bfb32e3ad742566470a10b
Parents: d14e26b
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Mon May 28 12:45:07 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Mon May 28 14:05:49 2018 +0530

----------------------------------------------------------------------
 .../mapreduce/v2/hs/webapp/HsJobBlock.java      | 18 ++++++++++++++-
 .../mapreduce/v2/hs/webapp/TestHsJobBlock.java  | 20 ++++++++++++++--
 .../apache/hadoop/yarn/webapp/Controller.java   |  4 ++++
 .../org/apache/hadoop/yarn/webapp/View.java     | 24 +++++++++++++-------
 4 files changed, 55 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index 18040f0..9b845cd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -27,6 +27,8 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import java.util.Date;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -39,8 +41,10 @@ import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
@@ -56,9 +60,14 @@ import com.google.inject.Inject;
  */
 public class HsJobBlock extends HtmlBlock {
   final AppContext appContext;
+  private UserGroupInformation ugi;
+  private boolean isFilterAppListByUserEnabled;
 
-  @Inject HsJobBlock(AppContext appctx) {
+  @Inject HsJobBlock(Configuration conf, AppContext appctx, ViewContext ctx) {
+    super(ctx);
     appContext = appctx;
+    isFilterAppListByUserEnabled = conf
+        .getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false);
   }
 
   /*
@@ -78,6 +87,13 @@ public class HsJobBlock extends HtmlBlock {
       html.p().__("Sorry, ", jid, " not found.").__();
       return;
     }
+    ugi = getCallerUGI();
+    if (isFilterAppListByUserEnabled && ugi != null
+        && !j.checkAccess(ugi, JobACL.VIEW_JOB)) {
+      html.p().__("Sorry, ", jid, " could not be viewed for '",
+          ugi.getUserName(), "'.").__();
+      return;
+    }
     if(j instanceof UnparsedJob) {
       final int taskCount = j.getTotalMaps() + j.getTotalReduces();
       UnparsedJob oversizedJob = (UnparsedJob) j;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
index 7fa238e..48e3d3b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
@@ -33,8 +33,10 @@ import org.apache.hadoop.mapreduce.v2.hs.UnparsedJob;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.util.StringHelper;
+import org.apache.hadoop.yarn.webapp.Controller;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.View.ViewContext;
 import org.apache.hadoop.yarn.webapp.view.BlockForTest;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlockForTest;
@@ -49,6 +51,8 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
+import javax.servlet.http.HttpServletRequest;
+
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -69,7 +73,13 @@ public class TestHsJobBlock {
         new JobHistoryStubWithAllOversizeJobs(maxAllowedTaskNum);
     jobHistory.init(config);
 
-    HsJobBlock jobBlock = new HsJobBlock(jobHistory) {
+    Controller.RequestContext rc = mock(Controller.RequestContext.class);
+    ViewContext view = mock(ViewContext.class);
+    HttpServletRequest req =mock(HttpServletRequest.class);
+    when(rc.getRequest()).thenReturn(req);
+    when(view.requestContext()).thenReturn(rc);
+
+    HsJobBlock jobBlock = new HsJobBlock(config, jobHistory, view) {
       // override this so that job block can fetch a job id.
       @Override
       public Map<String, String> moreParams() {
@@ -101,7 +111,13 @@ public class TestHsJobBlock {
     JobHistory jobHistory = new JobHitoryStubWithAllNormalSizeJobs();
     jobHistory.init(config);
 
-    HsJobBlock jobBlock = new HsJobBlock(jobHistory) {
+    Controller.RequestContext rc = mock(Controller.RequestContext.class);
+    ViewContext view = mock(ViewContext.class);
+    HttpServletRequest req =mock(HttpServletRequest.class);
+    when(rc.getRequest()).thenReturn(req);
+    when(view.requestContext()).thenReturn(rc);
+
+    HsJobBlock jobBlock = new HsJobBlock(config, jobHistory, view) {
       // override this so that the job block can fetch a job id.
       @Override
       public Map<String, String> moreParams() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
index dc4eee2..1b25b84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
@@ -108,6 +108,10 @@ public abstract class Controller implements Params {
     }
 
     public String prefix() { return prefix; }
+
+    public HttpServletRequest getRequest() {
+      return request;
+    }
   }
 
   private RequestContext context;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
index c16787d..666a0bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
@@ -96,28 +96,36 @@ public abstract class View implements Params {
     return vc;
   }
 
-  public Throwable error() { return context().rc.error; }
+  public Throwable error() {
+    return context().requestContext().error;
+  }
 
-  public int status() { return context().rc.status; }
+  public int status() {
+    return context().requestContext().status;
+  }
 
-  public boolean inDevMode() { return context().rc.devMode; }
+  public boolean inDevMode() {
+    return context().requestContext().devMode;
+  }
 
-  public Injector injector() { return context().rc.injector; }
+  public Injector injector() {
+    return context().requestContext().injector;
+  }
 
   public <T> T getInstance(Class<T> cls) {
     return injector().getInstance(cls);
   }
 
   public HttpServletRequest request() {
-    return context().rc.request;
+    return context().requestContext().getRequest();
   }
 
   public HttpServletResponse response() {
-    return context().rc.response;
+    return context().requestContext().response;
   }
 
   public Map<String, String> moreParams() {
-    return context().rc.moreParams();
+    return context().requestContext().moreParams();
   }
 
   /**
@@ -125,7 +133,7 @@ public abstract class View implements Params {
    * @return the cookies map
    */
   public Map<String, Cookie> cookies() {
-    return context().rc.cookies();
+    return context().requestContext().cookies();
   }
 
   public ServletOutputStream outputStream() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HDDS-81. Moving ContainerReport inside Datanode heartbeat. Contributed by Nanda Kumar.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 2d88621..f5fe46a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -33,7 +34,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.lease.Lease;
 import org.apache.hadoop.ozone.lease.LeaseException;
@@ -368,11 +369,12 @@ public class ContainerMapping implements Mapping {
    * @param reports Container report
    */
   @Override
-  public void processContainerReports(ContainerReportsRequestProto reports)
+  public void processContainerReports(DatanodeDetails datanodeDetails,
+                                      ContainerReportsProto reports)
       throws IOException {
     List<StorageContainerDatanodeProtocolProtos.ContainerInfo>
         containerInfos = reports.getReportsList();
-    containerSupervisor.handleContainerReport(reports);
+    containerSupervisor.handleContainerReport(datanodeDetails, reports);
     for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
         containerInfos) {
       byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
@@ -402,7 +404,7 @@ public class ContainerMapping implements Mapping {
           // Container not found in our container db.
           LOG.error("Error while processing container report from datanode :" +
                   " {}, for container: {}, reason: container doesn't exist in" +
-                  "container database.", reports.getDatanodeDetails(),
+                  "container database.", datanodeDetails,
               datanodeState.getContainerID());
         }
       } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index f560174..ee8e344 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -16,10 +16,11 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -98,7 +99,8 @@ public interface Mapping extends Closeable {
    *
    * @param reports Container report
    */
-  void processContainerReports(ContainerReportsRequestProto reports)
+  void processContainerReports(DatanodeDetails datanodeDetails,
+                               ContainerReportsProto reports)
       throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
index c14303f..5bd0574 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodePoolManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
@@ -295,24 +295,21 @@ public class ContainerSupervisor implements Closeable {
    * @param containerReport  -- Container report for a specific container from
    * a datanode.
    */
-  public void handleContainerReport(
-      ContainerReportsRequestProto containerReport) {
-    DatanodeDetails datanodeDetails = DatanodeDetails.getFromProtoBuf(
-        containerReport.getDatanodeDetails());
+  public void handleContainerReport(DatanodeDetails datanodeDetails,
+      ContainerReportsProto containerReport) {
     inProgressPoolListLock.readLock().lock();
     try {
       String poolName = poolManager.getNodePool(datanodeDetails);
       for (InProgressPool ppool : inProgressPoolList) {
         if (ppool.getPoolName().equalsIgnoreCase(poolName)) {
-          ppool.handleContainerReport(containerReport);
+          ppool.handleContainerReport(datanodeDetails, containerReport);
           return;
         }
       }
       // TODO: Decide if we can do anything else with this report.
       LOG.debug("Discarding the container report for pool {}. " +
               "That pool is not currently in the pool reconciliation process." +
-              " Container Name: {}", poolName,
-          containerReport.getDatanodeDetails());
+              " Container Name: {}", poolName, datanodeDetails);
     } catch (SCMException e) {
       LOG.warn("Skipping processing container report from datanode {}, "
               + "cause: failed to get the corresponding node pool",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
index c444e90..4b54731 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -178,21 +178,20 @@ public final class InProgressPool {
    *
    * @param containerReport - ContainerReport
    */
-  public void handleContainerReport(
-      ContainerReportsRequestProto containerReport) {
+  public void handleContainerReport(DatanodeDetails datanodeDetails,
+      ContainerReportsProto containerReport) {
     if (status == ProgressStatus.InProgress) {
-      executorService.submit(processContainerReport(containerReport));
+      executorService.submit(processContainerReport(datanodeDetails,
+          containerReport));
     } else {
       LOG.debug("Cannot handle container report when the pool is in {} status.",
           status);
     }
   }
 
-  private Runnable processContainerReport(
-      ContainerReportsRequestProto reports) {
+  private Runnable processContainerReport(DatanodeDetails datanodeDetails,
+      ContainerReportsProto reports) {
     return () -> {
-      DatanodeDetails datanodeDetails =
-          DatanodeDetails.getFromProtoBuf(reports.getDatanodeDetails());
       if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(),
           (k) -> true)) {
         nodeProcessed.incrementAndGet();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
index 05a9fc3..04658bd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hdds.scm.node;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
 
@@ -31,7 +31,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 public class HeartbeatQueueItem {
   private DatanodeDetails datanodeDetails;
   private long recvTimestamp;
-  private SCMNodeReport nodeReport;
+  private NodeReportProto nodeReport;
 
   /**
    *
@@ -40,7 +40,7 @@ public class HeartbeatQueueItem {
    * @param nodeReport - node report associated with the heartbeat if any.
    */
   HeartbeatQueueItem(DatanodeDetails datanodeDetails, long recvTimestamp,
-      SCMNodeReport nodeReport) {
+      NodeReportProto nodeReport) {
     this.datanodeDetails = datanodeDetails;
     this.recvTimestamp = recvTimestamp;
     this.nodeReport = nodeReport;
@@ -56,7 +56,7 @@ public class HeartbeatQueueItem {
   /**
    * @return node report.
    */
-  public SCMNodeReport getNodeReport() {
+  public NodeReportProto getNodeReport() {
     return nodeReport;
   }
 
@@ -72,7 +72,7 @@ public class HeartbeatQueueItem {
    */
   public static class Builder {
     private DatanodeDetails datanodeDetails;
-    private SCMNodeReport nodeReport;
+    private NodeReportProto nodeReport;
     private long recvTimestamp = monotonicNow();
 
     public Builder setDatanodeDetails(DatanodeDetails dnDetails) {
@@ -80,8 +80,8 @@ public class HeartbeatQueueItem {
       return this;
     }
 
-    public Builder setNodeReport(SCMNodeReport scmNodeReport) {
-      this.nodeReport = scmNodeReport;
+    public Builder setNodeReport(NodeReportProto report) {
+      this.nodeReport = report;
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 353a069..b339fb7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -28,15 +28,14 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
     .ErrorCode;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.ipc.Server;
@@ -592,7 +591,7 @@ public class SCMNodeManager
 
     DatanodeDetails datanodeDetails = hbItem.getDatanodeDetails();
     UUID datanodeUuid = datanodeDetails.getUuid();
-    SCMNodeReport nodeReport = hbItem.getNodeReport();
+    NodeReportProto nodeReport = hbItem.getNodeReport();
     long recvTimestamp = hbItem.getRecvTimestamp();
     long processTimestamp = Time.monotonicNow();
     if (LOG.isTraceEnabled()) {
@@ -637,7 +636,7 @@ public class SCMNodeManager
         new ReregisterCommand());
   }
 
-  private void updateNodeStat(UUID dnId, SCMNodeReport nodeReport) {
+  private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) {
     SCMNodeStat stat = nodeStats.get(dnId);
     if (stat == null) {
       LOG.debug("SCM updateNodeStat based on heartbeat from previous" +
@@ -649,8 +648,9 @@ public class SCMNodeManager
       long totalCapacity = 0;
       long totalRemaining = 0;
       long totalScmUsed = 0;
-      List<SCMStorageReport> storageReports = nodeReport.getStorageReportList();
-      for (SCMStorageReport report : storageReports) {
+      List<StorageReportProto> storageReports = nodeReport
+          .getStorageReportList();
+      for (StorageReportProto report : storageReports) {
         totalCapacity += report.getCapacity();
         totalRemaining +=  report.getRemaining();
         totalScmUsed+= report.getScmUsed();
@@ -710,7 +710,7 @@ public class SCMNodeManager
    * Register the node if the node finds that it is not registered with any
    * SCM.
    *
-   * @param datanodeDetailsProto - Send datanodeDetails with Node info.
+   * @param datanodeDetails - Send datanodeDetails with Node info.
    *                   This function generates and assigns new datanode ID
    *                   for the datanode. This allows SCM to be run independent
    *                   of Namenode if required.
@@ -719,13 +719,11 @@ public class SCMNodeManager
    * @return SCMHeartbeatResponseProto
    */
   @Override
-  public SCMCommand register(DatanodeDetailsProto datanodeDetailsProto,
-                             SCMNodeReport nodeReport) {
+  public RegisteredCommand register(
+      DatanodeDetails datanodeDetails, NodeReportProto nodeReport) {
 
     String hostname = null;
     String ip = null;
-    DatanodeDetails datanodeDetails = DatanodeDetails.getFromProtoBuf(
-        datanodeDetailsProto);
     InetAddress dnAddress = Server.getRemoteIp();
     if (dnAddress != null) {
       // Mostly called inside an RPC, update ip and peer hostname
@@ -734,7 +732,7 @@ public class SCMNodeManager
       datanodeDetails.setHostName(hostname);
       datanodeDetails.setIpAddress(ip);
     }
-    SCMCommand responseCommand = verifyDatanodeUUID(datanodeDetails);
+    RegisteredCommand responseCommand = verifyDatanodeUUID(datanodeDetails);
     if (responseCommand != null) {
       return responseCommand;
     }
@@ -785,7 +783,8 @@ public class SCMNodeManager
    * @param datanodeDetails - Datanode Details.
    * @return SCMCommand
    */
-  private SCMCommand verifyDatanodeUUID(DatanodeDetails datanodeDetails) {
+  private RegisteredCommand verifyDatanodeUUID(
+      DatanodeDetails datanodeDetails) {
     if (datanodeDetails.getUuid() != null &&
         nodes.containsKey(datanodeDetails.getUuid())) {
       LOG.trace("Datanode is already registered. Datanode: {}",
@@ -802,34 +801,23 @@ public class SCMNodeManager
   /**
    * Send heartbeat to indicate the datanode is alive and doing well.
    *
-   * @param datanodeDetailsProto - DatanodeDetailsProto.
+   * @param datanodeDetails - DatanodeDetailsProto.
    * @param nodeReport - node report.
    * @return SCMheartbeat response.
    * @throws IOException
    */
   @Override
   public List<SCMCommand> sendHeartbeat(
-      DatanodeDetailsProto datanodeDetailsProto, SCMNodeReport nodeReport) {
+      DatanodeDetails datanodeDetails, NodeReportProto nodeReport) {
 
-    Preconditions.checkNotNull(datanodeDetailsProto, "Heartbeat is missing " +
+    Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " +
         "DatanodeDetails.");
-    DatanodeDetails datanodeDetails = DatanodeDetails
-        .getFromProtoBuf(datanodeDetailsProto);
-    // Checking for NULL to make sure that we don't get
-    // an exception from ConcurrentList.
-    // This could be a problem in tests, if this function is invoked via
-    // protobuf, transport layer will guarantee that this is not null.
-    if (datanodeDetails != null) {
-      heartbeatQueue.add(
-          new HeartbeatQueueItem.Builder()
-              .setDatanodeDetails(datanodeDetails)
-              .setNodeReport(nodeReport)
-              .build());
-      return commandQueue.getCommand(datanodeDetails.getUuid());
-    } else {
-      LOG.error("Datanode ID in heartbeat is null");
-    }
-    return null;
+    heartbeatQueue.add(
+        new HeartbeatQueueItem.Builder()
+            .setDatanodeDetails(datanodeDetails)
+            .setNodeReport(nodeReport)
+            .build());
+    return commandQueue.getCommand(datanodeDetails.getUuid());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index fa423bb..6ea83df 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -33,7 +33,11 @@ import org.slf4j.LoggerFactory;
 
 import javax.management.ObjectName;
 import java.io.IOException;
-import java.util.*;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
 
@@ -159,7 +163,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   }
 
   public StorageReportResult processNodeReport(UUID datanodeID,
-      StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport)
+      StorageContainerDatanodeProtocolProtos.NodeReportProto nodeReport)
       throws IOException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(nodeReport);
@@ -170,9 +174,9 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
     Set<StorageLocationReport> storagReportSet = new HashSet<>();
     Set<StorageLocationReport> fullVolumeSet = new HashSet<>();
     Set<StorageLocationReport> failedVolumeSet = new HashSet<>();
-    List<SCMStorageReport>
+    List<StorageReportProto>
         storageReports = nodeReport.getStorageReportList();
-    for (SCMStorageReport report : storageReports) {
+    for (StorageReportProto report : storageReports) {
       StorageLocationReport storageReport =
           StorageLocationReport.getFromProtobuf(report);
       storagReportSet.add(storageReport);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 6e5b7de..1b1645d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -25,29 +25,47 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.BlockingService;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos
+    .ContainerBlocksDeletionACKResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos
+    .ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
+
 
+import static org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto
+    .Type.closeContainerCommand;
+import static org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto
+    .Type.deleteBlocksCommand;
+import static org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto
+    .Type.reregisterCommand;
 
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.versionCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.registeredCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.deleteBlocksCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand;
 
 
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
@@ -150,96 +168,81 @@ public class SCMDatanodeProtocolServer implements
 
   @Override
   public SCMHeartbeatResponseProto sendHeartbeat(
-      HddsProtos.DatanodeDetailsProto datanodeDetails,
-      StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport)
+      SCMHeartbeatRequestProto heartbeat)
       throws IOException {
+    // TODO: Add a heartbeat dispatcher.
+    DatanodeDetails datanodeDetails = DatanodeDetails
+        .getFromProtoBuf(heartbeat.getDatanodeDetails());
+    NodeReportProto nodeReport = heartbeat.getNodeReport();
     List<SCMCommand> commands =
         scm.getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport);
-    List<SCMCommandResponseProto> cmdResponses = new LinkedList<>();
+    List<SCMCommandProto> cmdResponses = new LinkedList<>();
     for (SCMCommand cmd : commands) {
-      cmdResponses.add(getCommandResponse(cmd, datanodeDetails.getUuid()));
+      cmdResponses.add(getCommandResponse(cmd));
     }
     return SCMHeartbeatResponseProto.newBuilder()
+        .setDatanodeUUID(datanodeDetails.getUuidString())
         .addAllCommands(cmdResponses).build();
   }
 
   @Override
-  public SCMRegisteredCmdResponseProto register(
-      HddsProtos.DatanodeDetailsProto datanodeDetails, SCMNodeReport nodeReport,
-      ContainerReportsRequestProto containerReportsRequestProto)
+  public SCMRegisteredResponseProto register(
+      HddsProtos.DatanodeDetailsProto datanodeDetailsProto,
+      NodeReportProto nodeReport,
+      ContainerReportsProto containerReportsProto)
       throws IOException {
+    DatanodeDetails datanodeDetails = DatanodeDetails
+        .getFromProtoBuf(datanodeDetailsProto);
     // TODO : Return the list of Nodes that forms the SCM HA.
-    RegisteredCommand registeredCommand = (RegisteredCommand) scm
-        .getScmNodeManager().register(datanodeDetails, nodeReport);
-    SCMCmdType type = registeredCommand.getType();
-    if (type == SCMCmdType.registeredCommand && registeredCommand.getError()
-        == SCMRegisteredCmdResponseProto.ErrorCode.success) {
-      scm.getScmContainerManager().processContainerReports(
-          containerReportsRequestProto);
+    RegisteredCommand registeredCommand = scm.getScmNodeManager()
+        .register(datanodeDetails, nodeReport);
+    if (registeredCommand.getError()
+        == SCMRegisteredResponseProto.ErrorCode.success) {
+      scm.getScmContainerManager().processContainerReports(datanodeDetails,
+          containerReportsProto);
     }
     return getRegisteredResponse(registeredCommand);
   }
 
   @VisibleForTesting
-  public static SCMRegisteredCmdResponseProto getRegisteredResponse(
-        SCMCommand cmd) {
-    Preconditions.checkState(cmd.getClass() == RegisteredCommand.class);
-    RegisteredCommand rCmd = (RegisteredCommand) cmd;
-    SCMCmdType type = cmd.getType();
-    if (type != SCMCmdType.registeredCommand) {
-      throw new IllegalArgumentException(
-          "Registered command is not well " + "formed. Internal Error.");
-    }
-    return SCMRegisteredCmdResponseProto.newBuilder()
+  public static SCMRegisteredResponseProto getRegisteredResponse(
+      RegisteredCommand cmd) {
+    return SCMRegisteredResponseProto.newBuilder()
         // TODO : Fix this later when we have multiple SCM support.
         // .setAddressList(addressList)
-        .setErrorCode(rCmd.getError())
-        .setClusterID(rCmd.getClusterID())
-        .setDatanodeUUID(rCmd.getDatanodeUUID())
+        .setErrorCode(cmd.getError())
+        .setClusterID(cmd.getClusterID())
+        .setDatanodeUUID(cmd.getDatanodeUUID())
         .build();
   }
 
-  @Override
-  public ContainerReportsResponseProto sendContainerReport(
-      ContainerReportsRequestProto reports)
+  public void processContainerReports(DatanodeDetails datanodeDetails,
+                                      ContainerReportsProto reports)
       throws IOException {
-    updateContainerReportMetrics(reports);
-
+    updateContainerReportMetrics(datanodeDetails, reports);
     // should we process container reports async?
-    scm.getScmContainerManager().processContainerReports(reports);
-    return ContainerReportsResponseProto.newBuilder().build();
+    scm.getScmContainerManager()
+        .processContainerReports(datanodeDetails, reports);
   }
 
-  private void updateContainerReportMetrics(
-      ContainerReportsRequestProto reports) {
-    ContainerStat newStat = null;
-    // TODO: We should update the logic once incremental container report
-    // type is supported.
-    if (reports
-        .getType() == StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.reportType.fullReport) {
-      newStat = new ContainerStat();
-      for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports
-          .getReportsList()) {
-        newStat.add(new ContainerStat(info.getSize(), info.getUsed(),
-            info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(),
-            info.getReadCount(), info.getWriteCount()));
-      }
-
-      // update container metrics
-      StorageContainerManager.getMetrics().setLastContainerStat(newStat);
+  private void updateContainerReportMetrics(DatanodeDetails datanodeDetails,
+                                            ContainerReportsProto reports) {
+    ContainerStat newStat = new ContainerStat();
+    for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports
+        .getReportsList()) {
+      newStat.add(new ContainerStat(info.getSize(), info.getUsed(),
+          info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(),
+          info.getReadCount(), info.getWriteCount()));
     }
+    // update container metrics
+    StorageContainerManager.getMetrics().setLastContainerStat(newStat);
 
     // Update container stat entry, this will trigger a removal operation if it
     // exists in cache.
-    synchronized (scm.getContainerReportCache()) {
-      String datanodeUuid = reports.getDatanodeDetails().getUuid();
-      if (datanodeUuid != null && newStat != null) {
-        scm.getContainerReportCache().put(datanodeUuid, newStat);
-        // update global view container metrics
-        StorageContainerManager.getMetrics().incrContainerStat(newStat);
-      }
-    }
+    String datanodeUuid = datanodeDetails.getUuidString();
+    scm.getContainerReportCache().put(datanodeUuid, newStat);
+    // update global view container metrics
+    StorageContainerManager.getMetrics().incrContainerStat(newStat);
   }
 
 
@@ -298,28 +301,15 @@ public class SCMDatanodeProtocolServer implements
    * @throws IOException
    */
   @VisibleForTesting
-  public StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto
-      getCommandResponse(
-      SCMCommand cmd, final String datanodeID) throws IOException {
-    SCMCmdType type = cmd.getType();
-    SCMCommandResponseProto.Builder builder =
-        SCMCommandResponseProto.newBuilder().setDatanodeUUID(datanodeID);
-    switch (type) {
-    case registeredCommand:
-      return builder
-          .setCmdType(registeredCommand)
-          .setRegisteredProto(SCMRegisteredCmdResponseProto
-              .getDefaultInstance())
-          .build();
-    case versionCommand:
-      return builder
-          .setCmdType(versionCommand)
-          .setVersionProto(SCMVersionResponseProto.getDefaultInstance())
-          .build();
+  public SCMCommandProto getCommandResponse(SCMCommand cmd)
+      throws IOException {
+    SCMCommandProto.Builder builder =
+        SCMCommandProto.newBuilder();
+    switch (cmd.getType()) {
     case reregisterCommand:
       return builder
-          .setCmdType(reregisterCommand)
-          .setReregisterProto(SCMReregisterCmdResponseProto
+          .setCommandType(reregisterCommand)
+          .setReregisterCommandProto(ReregisterCommandProto
               .getDefaultInstance())
           .build();
     case deleteBlocksCommand:
@@ -335,13 +325,14 @@ public class SCMDatanodeProtocolServer implements
               .collect(Collectors.toList());
       scm.getScmBlockManager().getDeletedBlockLog().incrementCount(txs);
       return builder
-          .setCmdType(deleteBlocksCommand)
-          .setDeleteBlocksProto(((DeleteBlocksCommand) cmd).getProto())
+          .setCommandType(deleteBlocksCommand)
+          .setDeleteBlocksCommandProto(((DeleteBlocksCommand) cmd).getProto())
           .build();
     case closeContainerCommand:
       return builder
-          .setCmdType(closeContainerCommand)
-          .setCloseContainerProto(((CloseContainerCommand) cmd).getProto())
+          .setCommandType(closeContainerCommand)
+          .setCloseContainerCommandProto(
+              ((CloseContainerCommand) cmd).getProto())
           .build();
     default:
       throw new IllegalArgumentException("Not implemented");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 5cf0a92..b8036d7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -18,9 +18,9 @@ package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol
-    .proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+        .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
@@ -53,16 +53,17 @@ public final class TestUtils {
   public static DatanodeDetails getDatanodeDetails(SCMNodeManager nodeManager,
       String uuid) {
     DatanodeDetails datanodeDetails = getDatanodeDetails(uuid);
-    nodeManager.register(datanodeDetails.getProtoBufMessage(), null);
+    nodeManager.register(datanodeDetails, null);
     return datanodeDetails;
   }
 
   /**
    * Create Node Report object.
-   * @return SCMNodeReport
+   * @return NodeReportProto
    */
-  public static SCMNodeReport createNodeReport(List<SCMStorageReport> reports) {
-    SCMNodeReport.Builder nodeReport = SCMNodeReport.newBuilder();
+  public static NodeReportProto createNodeReport(
+      List<StorageReportProto> reports) {
+    NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder();
     nodeReport.addAllStorageReport(reports);
     return nodeReport.build();
   }
@@ -71,14 +72,14 @@ public final class TestUtils {
    * Create SCM Storage Report object.
    * @return list of SCMStorageReport
    */
-  public static List<SCMStorageReport> createStorageReport(long capacity,
+  public static List<StorageReportProto> createStorageReport(long capacity,
       long used, long remaining, String path, StorageTypeProto type, String id,
       int count) {
-    List<SCMStorageReport> reportList = new ArrayList<>();
+    List<StorageReportProto> reportList = new ArrayList<>();
     for (int i = 0; i < count; i++) {
       Preconditions.checkNotNull(path);
       Preconditions.checkNotNull(id);
-      SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
+      StorageReportProto.Builder srb = StorageReportProto.newBuilder();
       srb.setStorageUuid(id).setStorageLocation(path).setCapacity(capacity)
           .setScmUsed(used).setRemaining(remaining);
       StorageTypeProto storageTypeProto =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index a46d7ba..8c59462 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -24,13 +24,14 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.assertj.core.util.Preconditions;
 import org.mockito.Mockito;
@@ -370,13 +371,13 @@ public class MockNodeManager implements NodeManager {
    * Register the node if the node finds that it is not registered with any
    * SCM.
    *
-   * @param datanodeDetails DatanodeDetailsProto
-   * @param nodeReport SCMNodeReport
+   * @param datanodeDetails DatanodeDetails
+   * @param nodeReport NodeReportProto
    * @return SCMHeartbeatResponseProto
    */
   @Override
-  public SCMCommand register(HddsProtos.DatanodeDetailsProto datanodeDetails,
-                             SCMNodeReport nodeReport) {
+  public RegisteredCommand register(DatanodeDetails datanodeDetails,
+      NodeReportProto nodeReport) {
     return null;
   }
 
@@ -388,9 +389,8 @@ public class MockNodeManager implements NodeManager {
    * @return SCMheartbeat response list
    */
   @Override
-  public List<SCMCommand> sendHeartbeat(
-      HddsProtos.DatanodeDetailsProto datanodeDetails,
-      SCMNodeReport nodeReport) {
+  public List<SCMCommand> sendHeartbeat(DatanodeDetails datanodeDetails,
+      NodeReportProto nodeReport) {
     if ((datanodeDetails != null) && (nodeReport != null) && (nodeReport
         .getStorageReportCount() > 0)) {
       SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails.getUuid());
@@ -398,8 +398,9 @@ public class MockNodeManager implements NodeManager {
       long totalCapacity = 0L;
       long totalRemaining = 0L;
       long totalScmUsed = 0L;
-      List<SCMStorageReport> storageReports = nodeReport.getStorageReportList();
-      for (SCMStorageReport report : storageReports) {
+      List<StorageReportProto> storageReports = nodeReport
+          .getStorageReportList();
+      for (StorageReportProto report : storageReports) {
         totalCapacity += report.getCapacity();
         totalRemaining += report.getRemaining();
         totalScmUsed += report.getScmUsed();
@@ -407,8 +408,7 @@ public class MockNodeManager implements NodeManager {
       aggregateStat.subtract(stat);
       stat.set(totalCapacity, totalScmUsed, totalRemaining);
       aggregateStat.add(stat);
-      nodeMetricMap.put(DatanodeDetails
-          .getFromProtoBuf(datanodeDetails).getUuid(), stat);
+      nodeMetricMap.put(datanodeDetails.getUuid(), stat);
 
     }
     return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index f318316..ba2ab64 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -191,8 +191,6 @@ public class TestContainerMapping {
   public void testFullContainerReport() throws IOException {
     ContainerInfo info = createContainer();
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
-    ContainerReportsRequestProto.reportType reportType =
-        ContainerReportsRequestProto.reportType.fullReport;
     List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
         new ArrayList<>();
     StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
@@ -209,12 +207,11 @@ public class TestContainerMapping {
 
     reports.add(ciBuilder.build());
 
-    ContainerReportsRequestProto.Builder crBuilder =
-        ContainerReportsRequestProto.newBuilder();
-    crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-        .setType(reportType).addAllReports(reports);
+    ContainerReportsProto.Builder crBuilder = ContainerReportsProto
+        .newBuilder();
+    crBuilder.addAllReports(reports);
 
-    mapping.processContainerReports(crBuilder.build());
+    mapping.processContainerReports(datanodeDetails, crBuilder.build());
 
     ContainerInfo updatedContainer =
         mapping.getContainer(info.getContainerID());
@@ -227,8 +224,6 @@ public class TestContainerMapping {
   public void testContainerCloseWithContainerReport() throws IOException {
     ContainerInfo info = createContainer();
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
-    ContainerReportsRequestProto.reportType reportType =
-        ContainerReportsRequestProto.reportType.fullReport;
     List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
         new ArrayList<>();
 
@@ -246,12 +241,11 @@ public class TestContainerMapping {
 
     reports.add(ciBuilder.build());
 
-    ContainerReportsRequestProto.Builder crBuilder =
-        ContainerReportsRequestProto.newBuilder();
-    crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-        .setType(reportType).addAllReports(reports);
+    ContainerReportsProto.Builder crBuilder =
+        ContainerReportsProto.newBuilder();
+    crBuilder.addAllReports(reports);
 
-    mapping.processContainerReports(crBuilder.build());
+    mapping.processContainerReports(datanodeDetails, crBuilder.build());
 
     ContainerInfo updatedContainer =
         mapping.getContainer(info.getContainerID());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index 15ecbad..0a3efda 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -199,9 +199,8 @@ public class TestContainerCloser {
 
   private void sendContainerReport(ContainerInfo info, long used) throws
       IOException {
-    ContainerReportsRequestProto.Builder
-        reports =  ContainerReportsRequestProto.newBuilder();
-    reports.setType(ContainerReportsRequestProto.reportType.fullReport);
+    ContainerReportsProto.Builder
+        reports =  ContainerReportsProto.newBuilder();
 
     StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
         StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
@@ -214,9 +213,8 @@ public class TestContainerCloser {
         .setWriteCount(100000000L)
         .setReadBytes(2000000000L)
         .setWriteBytes(2000000000L);
-    reports.setDatanodeDetails(
-        TestUtils.getDatanodeDetails().getProtoBufMessage());
     reports.addReports(ciBuilder);
-    mapping.processContainerReports(reports.build());
+    mapping.processContainerReports(TestUtils.getDatanodeDetails(),
+        reports.build());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 09b6cd1..5ad28f6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -133,9 +133,9 @@ public class TestContainerPlacement {
       for (DatanodeDetails datanodeDetails : datanodes) {
         String id = UUID.randomUUID().toString();
         String path = testDir.getAbsolutePath() + "/" + id;
-        List<SCMStorageReport> reports = TestUtils
+        List<StorageReportProto> reports = TestUtils
             .createStorageReport(capacity, used, remaining, path, null, id, 1);
-        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+        nodeManager.sendHeartbeat(datanodeDetails,
             TestUtils.createNodeReport(reports));
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index de87e50..2b04d6b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdds.scm.node;
 
 import com.google.common.base.Supplier;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
@@ -26,7 +28,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -63,8 +65,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
     .HEALTHY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.core.StringStartsWith.startsWith;
 import static org.junit.Assert.assertEquals;
@@ -144,7 +144,7 @@ public class TestNodeManager {
       for (int x = 0; x < nodeManager.getMinimumChillModeNodes(); x++) {
         DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
             nodeManager);
-        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+        nodeManager.sendHeartbeat(datanodeDetails,
             null);
       }
 
@@ -191,8 +191,8 @@ public class TestNodeManager {
 
       // Need 100 nodes to come out of chill mode, only one node is sending HB.
       nodeManager.setMinimumChillModeNodes(100);
-      nodeManager.sendHeartbeat(TestUtils.getDatanodeDetails(nodeManager)
-          .getProtoBufMessage(), null);
+      nodeManager.sendHeartbeat(TestUtils.getDatanodeDetails(nodeManager),
+          null);
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
           100, 4 * 1000);
       assertFalse("Not enough heartbeat, Node manager should have" +
@@ -219,7 +219,7 @@ public class TestNodeManager {
 
       // Send 10 heartbeat from same node, and assert we never leave chill mode.
       for (int x = 0; x < 10; x++) {
-        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+        nodeManager.sendHeartbeat(datanodeDetails,
             null);
       }
 
@@ -250,7 +250,7 @@ public class TestNodeManager {
     nodeManager.close();
 
     // These should never be processed.
-    nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+    nodeManager.sendHeartbeat(datanodeDetails,
         null);
 
     // Let us just wait for 2 seconds to prove that HBs are not processed.
@@ -274,13 +274,13 @@ public class TestNodeManager {
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
     String dnId = datanodeDetails.getUuidString();
     String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-    List<SCMStorageReport> reports =
+    List<StorageReportProto> reports =
         TestUtils.createStorageReport(100, 10, 90, storagePath, null, dnId, 1);
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
-      nodemanager.register(datanodeDetails.getProtoBufMessage(),
+      nodemanager.register(datanodeDetails,
           TestUtils.createNodeReport(reports));
       List<SCMCommand> command = nodemanager.sendHeartbeat(
-          datanodeDetails.getProtoBufMessage(), null);
+          datanodeDetails, null);
       Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails));
       Assert.assertTrue("On regular HB calls, SCM responses a "
           + "datanode with an empty command list", command.isEmpty());
@@ -298,10 +298,10 @@ public class TestNodeManager {
         GenericTestUtils.waitFor(new Supplier<Boolean>() {
           @Override public Boolean get() {
             List<SCMCommand> command =
-                nodemanager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+                nodemanager.sendHeartbeat(datanodeDetails,
                     null);
             return command.size() == 1 && command.get(0).getType()
-                .equals(SCMCmdType.reregisterCommand);
+                .equals(SCMCommandProto.Type.reregisterCommand);
           }
         }, 100, 3 * 1000);
       } catch (TimeoutException e) {
@@ -330,7 +330,7 @@ public class TestNodeManager {
       for (int x = 0; x < count; x++) {
         DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
             nodeManager);
-        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+        nodeManager.sendHeartbeat(datanodeDetails,
             null);
       }
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
@@ -422,19 +422,19 @@ public class TestNodeManager {
       DatanodeDetails staleNode = TestUtils.getDatanodeDetails(nodeManager);
 
       // Heartbeat once
-      nodeManager.sendHeartbeat(staleNode.getProtoBufMessage(),
+      nodeManager.sendHeartbeat(staleNode,
           null);
 
       // Heartbeat all other nodes.
       for (DatanodeDetails dn : nodeList) {
-        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
+        nodeManager.sendHeartbeat(dn, null);
       }
 
       // Wait for 2 seconds .. and heartbeat good nodes again.
       Thread.sleep(2 * 1000);
 
       for (DatanodeDetails dn : nodeList) {
-        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
+        nodeManager.sendHeartbeat(dn, null);
       }
 
       // Wait for 2 seconds, wait a total of 4 seconds to make sure that the
@@ -451,7 +451,7 @@ public class TestNodeManager {
 
       // heartbeat good nodes again.
       for (DatanodeDetails dn : nodeList) {
-        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
+        nodeManager.sendHeartbeat(dn, null);
       }
 
       //  6 seconds is the dead window for this test , so we wait a total of
@@ -565,11 +565,11 @@ public class TestNodeManager {
       DatanodeDetails deadNode =
           TestUtils.getDatanodeDetails(nodeManager);
       nodeManager.sendHeartbeat(
-          healthyNode.getProtoBufMessage(), null);
+          healthyNode, null);
       nodeManager.sendHeartbeat(
-          staleNode.getProtoBufMessage(), null);
+          staleNode, null);
       nodeManager.sendHeartbeat(
-          deadNode.getProtoBufMessage(), null);
+          deadNode, null);
 
       // Sleep so that heartbeat processing thread gets to run.
       Thread.sleep(500);
@@ -596,15 +596,15 @@ public class TestNodeManager {
        */
 
       nodeManager.sendHeartbeat(
-          healthyNode.getProtoBufMessage(), null);
+          healthyNode, null);
       nodeManager.sendHeartbeat(
-          staleNode.getProtoBufMessage(), null);
+          staleNode, null);
       nodeManager.sendHeartbeat(
-          deadNode.getProtoBufMessage(), null);
+          deadNode, null);
 
       Thread.sleep(1500);
       nodeManager.sendHeartbeat(
-          healthyNode.getProtoBufMessage(), null);
+          healthyNode, null);
       Thread.sleep(2 * 1000);
       assertEquals(1, nodeManager.getNodeCount(HEALTHY));
 
@@ -625,12 +625,12 @@ public class TestNodeManager {
        */
 
       nodeManager.sendHeartbeat(
-          healthyNode.getProtoBufMessage(), null);
+          healthyNode, null);
       nodeManager.sendHeartbeat(
-          staleNode.getProtoBufMessage(), null);
+          staleNode, null);
       Thread.sleep(1500);
       nodeManager.sendHeartbeat(
-          healthyNode.getProtoBufMessage(), null);
+          healthyNode, null);
       Thread.sleep(2 * 1000);
 
       // 3.5 seconds have elapsed for stale node, so it moves into Stale.
@@ -664,11 +664,11 @@ public class TestNodeManager {
        * back all the nodes in healthy state.
        */
       nodeManager.sendHeartbeat(
-          healthyNode.getProtoBufMessage(), null);
+          healthyNode, null);
       nodeManager.sendHeartbeat(
-          staleNode.getProtoBufMessage(), null);
+          staleNode, null);
       nodeManager.sendHeartbeat(
-          deadNode.getProtoBufMessage(), null);
+          deadNode, null);
       Thread.sleep(500);
       //Assert all nodes are healthy.
       assertEquals(3, nodeManager.getAllNodes().size());
@@ -689,7 +689,7 @@ public class TestNodeManager {
                                 int sleepDuration) throws InterruptedException {
     while (!Thread.currentThread().isInterrupted()) {
       for (DatanodeDetails dn : list) {
-        manager.sendHeartbeat(dn.getProtoBufMessage(), null);
+        manager.sendHeartbeat(dn, null);
       }
       Thread.sleep(sleepDuration);
     }
@@ -775,7 +775,7 @@ public class TestNodeManager {
       // No Thread just one time HBs the node manager, so that these will be
       // marked as dead nodes eventually.
       for (DatanodeDetails dn : deadNodeList) {
-        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
+        nodeManager.sendHeartbeat(dn, null);
       }
 
 
@@ -940,7 +940,7 @@ public class TestNodeManager {
       DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
           nodeManager);
       nodeManager.sendHeartbeat(
-          datanodeDetails.getProtoBufMessage(), null);
+          datanodeDetails, null);
       String status = nodeManager.getChillModeStatus();
       Assert.assertThat(status, containsString("Still in chill " +
           "mode, waiting on nodes to report in."));
@@ -967,8 +967,7 @@ public class TestNodeManager {
       // Assert that node manager force enter cannot be overridden by nodes HBs.
       for (int x = 0; x < 20; x++) {
         DatanodeDetails datanode = TestUtils.getDatanodeDetails(nodeManager);
-        nodeManager.sendHeartbeat(datanode.getProtoBufMessage(),
-            null);
+        nodeManager.sendHeartbeat(datanode, null);
       }
 
       Thread.sleep(500);
@@ -1009,10 +1008,10 @@ public class TestNodeManager {
         String dnId = datanodeDetails.getUuidString();
         long free = capacity - used;
         String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-        List<SCMStorageReport> reports = TestUtils
+        List<StorageReportProto> reports = TestUtils
             .createStorageReport(capacity, used, free, storagePath,
                 null, dnId, 1);
-        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+        nodeManager.sendHeartbeat(datanodeDetails,
             TestUtils.createNodeReport(reports));
       }
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
@@ -1058,11 +1057,11 @@ public class TestNodeManager {
         long scmUsed = x * usedPerHeartbeat;
         long remaining = capacity - scmUsed;
         String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-        List<SCMStorageReport> reports = TestUtils
+        List<StorageReportProto> reports = TestUtils
             .createStorageReport(capacity, scmUsed, remaining, storagePath,
                 null, dnId, 1);
 
-        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+        nodeManager.sendHeartbeat(datanodeDetails,
             TestUtils.createNodeReport(reports));
         Thread.sleep(100);
       }
@@ -1140,10 +1139,10 @@ public class TestNodeManager {
 
       // Send a new report to bring the dead node back to healthy
       String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-      List<SCMStorageReport> reports = TestUtils
+      List<StorageReportProto> reports = TestUtils
           .createStorageReport(capacity, expectedScmUsed, expectedRemaining,
               storagePath, null, dnId, 1);
-      nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+      nodeManager.sendHeartbeat(datanodeDetails,
           TestUtils.createNodeReport(reports));
 
       // Wait up to 5 seconds so that the dead node becomes healthy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index b824412..072dee7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -21,9 +21,9 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+    StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -134,7 +134,7 @@ public class TestSCMNodeStorageStatMap {
   @Test
   public void testProcessNodeReportCheckOneNode() throws IOException {
     UUID key = getFirstKey();
-    List<SCMStorageReport> reportList = new ArrayList<>();
+    List<StorageReportProto> reportList = new ArrayList<>();
     Set<StorageLocationReport> reportSet = testData.get(key);
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     map.insertNewDatanode(key, reportSet);
@@ -146,16 +146,16 @@ public class TestSCMNodeStorageStatMap {
     long reportCapacity = report.getCapacity();
     long reportScmUsed = report.getScmUsed();
     long reportRemaining = report.getRemaining();
-    List<SCMStorageReport> reports = TestUtils
+    List<StorageReportProto> reports = TestUtils
         .createStorageReport(reportCapacity, reportScmUsed, reportRemaining,
             path, null, storageId, 1);
     StorageReportResult result =
         map.processNodeReport(key, TestUtils.createNodeReport(reports));
     Assert.assertEquals(result.getStatus(),
         SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
-    StorageContainerDatanodeProtocolProtos.SCMNodeReport.Builder nrb =
-        SCMNodeReport.newBuilder();
-    SCMStorageReport srb = reportSet.iterator().next().getProtoBufMessage();
+    StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
+        NodeReportProto.newBuilder();
+    StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage();
     reportList.add(srb);
     result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
     Assert.assertEquals(result.getStatus(),
@@ -168,7 +168,7 @@ public class TestSCMNodeStorageStatMap {
     Assert.assertEquals(result.getStatus(),
         SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);
     // Mark a disk failed 
-    SCMStorageReport srb2 = SCMStorageReport.newBuilder()
+    StorageReportProto srb2 = StorageReportProto.newBuilder()
         .setStorageUuid(UUID.randomUUID().toString())
         .setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity)
         .setCapacity(reportCapacity).setRemaining(0).setFailed(true).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 1d92cdc..34779da 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -20,22 +20,21 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 import org.apache.hadoop.ipc.RPC;
@@ -200,7 +199,7 @@ public class TestEndPoint {
     DatanodeDetails nodeToRegister = getDatanodeDetails();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(
         SCMTestUtils.getConf(), serverAddress, 1000)) {
-      SCMRegisteredCmdResponseProto responseProto = rpcEndPoint.getEndPoint()
+      SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
           .register(nodeToRegister.getProtoBufMessage(), TestUtils
                   .createNodeReport(
                       getStorageReports(nodeToRegister.getUuidString())),
@@ -215,7 +214,7 @@ public class TestEndPoint {
     }
   }
 
-  private List<SCMStorageReport> getStorageReports(String id) {
+  private List<StorageReportProto> getStorageReports(String id) {
     String storagePath = testDir.getAbsolutePath() + "/" + id;
     return TestUtils.createStorageReport(100, 10, 90, storagePath, null, id, 1);
   }
@@ -293,9 +292,14 @@ public class TestEndPoint {
              createEndpoint(SCMTestUtils.getConf(),
                  serverAddress, 1000)) {
       String storageId = UUID.randomUUID().toString();
+      SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
+          .setDatanodeDetails(dataNode.getProtoBufMessage())
+          .setNodeReport(TestUtils.createNodeReport(
+              getStorageReports(storageId)))
+          .build();
+
       SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .sendHeartbeat(dataNode.getProtoBufMessage(),
-              TestUtils.createNodeReport(getStorageReports(storageId)));
+          .sendHeartbeat(request);
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(0, responseProto.getCommandsCount());
     }
@@ -361,86 +365,11 @@ public class TestEndPoint {
         lessThanOrEqualTo(rpcTimeout + tolerance));
   }
 
-  /**
-   * Returns a new container report.
-   * @return
-   */
-  ContainerReport getRandomContainerReport() {
-    return new ContainerReport(RandomUtils.nextLong(),
-        DigestUtils.sha256Hex("Random"));
-  }
-
-  /**
-   * Creates dummy container reports.
-   * @param count - The number of closed containers to create.
-   * @return ContainerReportsProto
-   */
-  StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto
-      createDummyContainerReports(int count) {
-    StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto.Builder
-        reportsBuilder = StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.newBuilder();
-    for (int x = 0; x < count; x++) {
-      reportsBuilder.addReports(getRandomContainerReport()
-          .getProtoBufMessage());
-    }
-    reportsBuilder.setDatanodeDetails(getDatanodeDetails()
-        .getProtoBufMessage());
-    reportsBuilder.setType(StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.reportType.fullReport);
-    return reportsBuilder.build();
-  }
-
-  /**
-   * Tests that rpcEndpoint sendContainerReport works as expected.
-   * @throws Exception
-   */
-  @Test
-  public void testContainerReportSend() throws Exception {
-    final int count = 1000;
-    scmServerImpl.reset();
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(SCMTestUtils.getConf(),
-                 serverAddress, 1000)) {
-      ContainerReportsResponseProto responseProto = rpcEndPoint
-          .getEndPoint().sendContainerReport(createDummyContainerReports(
-              count));
-      Assert.assertNotNull(responseProto);
-    }
-    Assert.assertEquals(1, scmServerImpl.getContainerReportsCount());
-    Assert.assertEquals(count, scmServerImpl.getContainerCount());
-  }
-
-
-  /**
-   * Tests that rpcEndpoint sendContainerReport works as expected.
-   * @throws Exception
-   */
-  @Test
-  public void testContainerReport() throws Exception {
-    final int count = 1000;
-    scmServerImpl.reset();
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(SCMTestUtils.getConf(),
-                 serverAddress, 1000)) {
-      ContainerReportsResponseProto responseProto = rpcEndPoint
-          .getEndPoint().sendContainerReport(createContainerReport(count,
-              null));
-      Assert.assertNotNull(responseProto);
-    }
-    Assert.assertEquals(1, scmServerImpl.getContainerReportsCount());
-    Assert.assertEquals(count, scmServerImpl.getContainerCount());
-    final long expectedKeyCount = count * 1000;
-    Assert.assertEquals(expectedKeyCount, scmServerImpl.getKeyCount());
-    final long expectedBytesUsed = count * OzoneConsts.GB * 2;
-    Assert.assertEquals(expectedBytesUsed, scmServerImpl.getBytesUsed());
-  }
-
-  private ContainerReportsRequestProto createContainerReport(
+  private ContainerReportsProto createContainerReport(
       int count, DatanodeDetails datanodeDetails) {
-    StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto.Builder
+    StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder
         reportsBuilder = StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.newBuilder();
+        .ContainerReportsProto.newBuilder();
     for (int x = 0; x < count; x++) {
       long containerID = RandomUtils.nextLong();
       ContainerReport report = new ContainerReport(containerID,
@@ -455,14 +384,6 @@ public class TestEndPoint {
 
       reportsBuilder.addReports(report.getProtoBufMessage());
     }
-    if(datanodeDetails == null) {
-      reportsBuilder.setDatanodeDetails(getDatanodeDetails()
-          .getProtoBufMessage());
-    } else {
-      reportsBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage());
-    }
-    reportsBuilder.setType(StorageContainerDatanodeProtocolProtos
-        .ContainerReportsRequestProto.reportType.fullReport);
     return reportsBuilder.build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/201440b9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
deleted file mode 100644
index e197886..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor;
-import org.apache.hadoop.hdds.scm.container.replication.InProgressPool;
-import org.apache.hadoop.hdds.scm.node.CommandQueue;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.container.testutils
-    .ReplicationDatanodeStateManager;
-import org.apache.hadoop.ozone.container.testutils.ReplicationNodeManagerMock;
-import org.apache.hadoop.ozone.container.testutils
-    .ReplicationNodePoolManagerMock;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.event.Level;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.apache.ratis.shaded.com.google.common.util.concurrent
-    .Uninterruptibles.sleepUninterruptibly;
-
-/**
- * Tests for the container manager.
- */
-public class TestContainerSupervisor {
-  final static String POOL_NAME_TEMPLATE = "Pool%d";
-  static final int MAX_DATANODES = 72;
-  static final int POOL_SIZE = 24;
-  static final int POOL_COUNT = 3;
-  private LogCapturer logCapturer = LogCapturer.captureLogs(
-      LogFactory.getLog(ContainerSupervisor.class));
-  private List<DatanodeDetails> datanodes = new LinkedList<>();
-  private NodeManager nodeManager;
-  private NodePoolManager poolManager;
-  private CommandQueue commandQueue;
-  private ContainerSupervisor containerSupervisor;
-  private ReplicationDatanodeStateManager datanodeStateManager;
-
-  @After
-  public void tearDown() throws Exception {
-    logCapturer.stopCapturing();
-    GenericTestUtils.setLogLevel(ContainerSupervisor.LOG, Level.INFO);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    GenericTestUtils.setLogLevel(ContainerSupervisor.LOG, Level.DEBUG);
-    Map<DatanodeDetails, NodeState> nodeStateMap = new HashMap<>();
-    // We are setting up 3 pools with 24 nodes each in this cluster.
-    // First we create 72 Datanodes.
-    for (int x = 0; x < MAX_DATANODES; x++) {
-      DatanodeDetails datanode = TestUtils.getDatanodeDetails();
-      datanodes.add(datanode);
-      nodeStateMap.put(datanode, HEALTHY);
-    }
-
-    commandQueue = new CommandQueue();
-
-    // All nodes in this cluster are healthy for time being.
-    nodeManager = new ReplicationNodeManagerMock(nodeStateMap, commandQueue);
-    poolManager = new ReplicationNodePoolManagerMock();
-
-
-    Assert.assertEquals("Max datanodes should be equal to POOL_SIZE * " +
-        "POOL_COUNT", POOL_COUNT * POOL_SIZE, MAX_DATANODES);
-
-    // Start from 1 instead of zero so we can multiply and get the node index.
-    for (int y = 1; y <= POOL_COUNT; y++) {
-      String poolName = String.format(POOL_NAME_TEMPLATE, y);
-      for (int z = 0; z < POOL_SIZE; z++) {
-        DatanodeDetails id = datanodes.get(y * z);
-        poolManager.addNode(poolName, id);
-      }
-    }
-    OzoneConfiguration config = SCMTestUtils.getOzoneConf();
-    config.setTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, 2,
-        TimeUnit.SECONDS);
-    config.setTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL, 1,
-        TimeUnit.SECONDS);
-    containerSupervisor = new ContainerSupervisor(config,
-        nodeManager, poolManager);
-    datanodeStateManager = new ReplicationDatanodeStateManager(nodeManager,
-        poolManager);
-    // Sleep for one second to make sure all threads get time to run.
-    sleepUninterruptibly(1, TimeUnit.SECONDS);
-  }
-
-  @Test
-  /**
-   * Asserts that at least one pool is picked up for processing.
-   */
-  public void testAssertPoolsAreProcessed() {
-    // This asserts that replication manager has started processing at least
-    // one pool.
-    Assert.assertTrue(containerSupervisor.getInProgressPoolCount() > 0);
-
-    // Since all datanodes are flagged as healthy in this test, for each
-    // datanode we must have queued a command.
-    Assert.assertEquals("Commands are in queue :",
-        POOL_SIZE * containerSupervisor.getInProgressPoolCount(),
-        commandQueue.getCommandsInQueue());
-  }
-
-  @Test
-  /**
-   * This test sends container reports for 2 containers to a pool in progress.
-   * Asserts that we are able to find a container with single replica and do
-   * not find container with 3 replicas.
-   */
-  public void testDetectSingleContainerReplica() throws TimeoutException,
-      InterruptedException {
-    long singleNodeContainerID = 9001;
-    long threeNodeContainerID = 9003;
-    InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0);
-    // Only single datanode reporting that "SingleNodeContainer" exists.
-    List<ContainerReportsRequestProto> clist =
-        datanodeStateManager.getContainerReport(singleNodeContainerID,
-            ppool.getPool().getPoolName(), 1);
-    ppool.handleContainerReport(clist.get(0));
-
-    // Three nodes are going to report that ThreeNodeContainer  exists.
-    clist = datanodeStateManager.getContainerReport(threeNodeContainerID,
-        ppool.getPool().getPoolName(), 3);
-
-    for (ContainerReportsRequestProto reportsProto : clist) {
-      ppool.handleContainerReport(reportsProto);
-    }
-    GenericTestUtils.waitFor(() -> ppool.getContainerProcessedCount() == 4,
-        200, 1000);
-    ppool.setDoneProcessing();
-
-    List<Map.Entry<Long, Integer>> containers = ppool.filterContainer(p -> p
-        .getValue() == 1);
-    Assert.assertEquals(singleNodeContainerID,
-        containers.get(0).getKey().longValue());
-    int count = containers.get(0).getValue();
-    Assert.assertEquals(1L, count);
-  }
-
-  @Test
-  /**
-   * We create three containers, Normal,OveReplicated and WayOverReplicated
-   * containers. This test asserts that we are able to find the
-   * over replicated containers.
-   */
-  public void testDetectOverReplica() throws TimeoutException,
-      InterruptedException {
-    long normalContainerID = 9000;
-    long overReplicatedContainerID = 9001;
-    long wayOverReplicatedContainerID = 9002;
-    InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0);
-
-    List<ContainerReportsRequestProto> clist =
-        datanodeStateManager.getContainerReport(normalContainerID,
-            ppool.getPool().getPoolName(), 3);
-    ppool.handleContainerReport(clist.get(0));
-
-    clist = datanodeStateManager.getContainerReport(overReplicatedContainerID,
-        ppool.getPool().getPoolName(), 4);
-
-    for (ContainerReportsRequestProto reportsProto : clist) {
-      ppool.handleContainerReport(reportsProto);
-    }
-
-    clist = datanodeStateManager.getContainerReport(
-        wayOverReplicatedContainerID, ppool.getPool().getPoolName(), 7);
-
-    for (ContainerReportsRequestProto reportsProto : clist) {
-      ppool.handleContainerReport(reportsProto);
-    }
-
-    // We ignore container reports from the same datanodes.
-    // it is possible that these each of these containers get placed
-    // on same datanodes, so allowing for 4 duplicates in the set of 14.
-    GenericTestUtils.waitFor(() -> ppool.getContainerProcessedCount() > 10,
-        200, 1000);
-    ppool.setDoneProcessing();
-
-    List<Map.Entry<Long, Integer>> containers = ppool.filterContainer(p -> p
-        .getValue() > 3);
-    Assert.assertEquals(2, containers.size());
-  }
-
-  @Test
-  /**
-   * This test verifies that all pools are picked up for replica processing.
-   *
-   */
-  public void testAllPoolsAreProcessed() throws TimeoutException,
-      InterruptedException {
-    // Verify that we saw all three pools being picked up for processing.
-    GenericTestUtils.waitFor(() -> containerSupervisor.getPoolProcessCount()
-        >= 3, 200, 15 * 1000);
-    Assert.assertTrue(logCapturer.getOutput().contains("Pool1") &&
-        logCapturer.getOutput().contains("Pool2") &&
-        logCapturer.getOutput().contains("Pool3"));
-  }
-
-  @Test
-  /**
-   * Adds a new pool and tests that we are able to pick up that new pool for
-   * processing as well as handle container reports for datanodes in that pool.
-   * @throws TimeoutException
-   * @throws InterruptedException
-   */
-  public void testAddingNewPoolWorks()
-      throws TimeoutException, InterruptedException, IOException {
-    LogCapturer inProgressLog = LogCapturer.captureLogs(
-        LogFactory.getLog(InProgressPool.class));
-    GenericTestUtils.setLogLevel(InProgressPool.LOG, Level.DEBUG);
-    try {
-      DatanodeDetails id = TestUtils.getDatanodeDetails();
-      ((ReplicationNodeManagerMock) (nodeManager)).addNode(id, HEALTHY);
-      poolManager.addNode("PoolNew", id);
-      GenericTestUtils.waitFor(() ->
-              logCapturer.getOutput().contains("PoolNew"),
-          200, 15 * 1000);
-
-      long newContainerID = 7001;
-      // Assert that we are able to send a container report to this new
-      // pool and datanode.
-      List<ContainerReportsRequestProto> clist =
-          datanodeStateManager.getContainerReport(newContainerID,
-              "PoolNew", 1);
-      containerSupervisor.handleContainerReport(clist.get(0));
-      GenericTestUtils.waitFor(() ->
-          inProgressLog.getOutput()
-              .contains(Long.toString(newContainerID)) && inProgressLog
-              .getOutput().contains(id.getUuidString()),
-          200, 10 * 1000);
-    } finally {
-      inProgressLog.stopCapturing();
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed by Xiao Liang.

Posted by xy...@apache.org.
HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e0d4b1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e0d4b1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e0d4b1c

Branch: refs/heads/HDDS-4
Commit: 1e0d4b1c283fb98a95c60a1723f594befb3c18a9
Parents: 02322de
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 25 09:10:32 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 25 09:14:28 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0d4b1c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
index 1507844..4afacd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
@@ -118,7 +118,7 @@ public class TestDataNodeFaultInjector {
       final MetricsDataNodeFaultInjector mdnFaultInjector) throws Exception {
 
     final Path baseDir = new Path(
-        PathUtils.getTestDir(getClass()).getAbsolutePath(),
+        PathUtils.getTestDir(getClass()).getPath(),
         GenericTestUtils.getMethodName());
     final DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
     DataNodeFaultInjector.set(mdnFaultInjector);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9086e1fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9086e1fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9086e1fb

Branch: refs/heads/HDDS-4
Commit: 9086e1fb54e7567dba3aeff8b9c62cf8dd56d9b2
Parents: 1361030
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu May 31 08:49:34 2018 -0700

----------------------------------------------------------------------
 .../authentication/util/KerberosUtil.java       |   2 +-
 .../conf/TestConfigurationFieldsBase.java       |   2 +
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  10 +-
 .../scm/protocol/ScmBlockLocationProtocol.java  |   3 +
 .../StorageContainerLocationProtocol.java       |   4 +
 .../protocolPB/ScmBlockLocationProtocolPB.java  |   6 +
 .../StorageContainerLocationProtocolPB.java     |   4 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |   5 +
 .../common/src/main/resources/ozone-default.xml |  41 +++-
 .../StorageContainerDatanodeProtocol.java       |   4 +
 .../StorageContainerDatanodeProtocolPB.java     |   6 +
 .../scm/server/StorageContainerManager.java     |  49 ++++-
 .../StorageContainerManagerHttpServer.java      |   5 +-
 .../ozone/client/protocol/ClientProtocol.java   |   3 +
 hadoop-ozone/common/src/main/bin/start-ozone.sh |   7 +
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |  13 +-
 hadoop-ozone/integration-test/pom.xml           |   6 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |  17 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java    | 205 +++++++++++++++++++
 .../ozone/TestStorageContainerManager.java      |   4 +-
 21 files changed, 368 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index c011045..4459928 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -167,7 +167,7 @@ public class KerberosUtil {
   }
 
   /* Return fqdn of the current host */
-  static String getLocalHostName() throws UnknownHostException {
+  public static String getLocalHostName() throws UnknownHostException {
     return InetAddress.getLocalHost().getCanonicalHostName();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 7f27d7d..c20733d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,6 +436,8 @@ public abstract class TestConfigurationFieldsBase {
     // Create XML key/value map
     LOG_XML.debug("Reading XML property files\n");
     xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
+    // Remove hadoop property set in ozone-default.xml
+    xmlKeyValueMap.remove("hadoop.custom.tags");
     LOG_XML.debug("\n=====\n");
 
     // Create default configuration variable key/value map

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 48c6dce..17c99bb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -241,18 +241,7 @@ public final class HddsUtils {
   }
 
   public static boolean isHddsEnabled(Configuration conf) {
-    String securityEnabled =
-        conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-            "simple");
-    boolean securityAuthorizationEnabled = conf.getBoolean(
-        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
-
-    if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
-      LOG.error("Ozone is not supported in a security enabled cluster. ");
-      return false;
-    } else {
-      return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
-    }
+    return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 85407e6..ba8f310 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -132,8 +132,9 @@ public final class ScmConfigKeys {
       "ozone.scm.http-address";
   public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
       "ozone.scm.https-address";
-  public static final String OZONE_SCM_KEYTAB_FILE =
-      "ozone.scm.keytab.file";
+  public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
+      "ozone.scm.kerberos.keytab.file";
+  public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = "ozone.scm.kerberos.principal";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -279,6 +280,11 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_CONTAINER_CLOSE_THRESHOLD =
       "ozone.scm.container.close.threshold";
   public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
+
+  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+      "ozone.scm.web.authentication.kerberos.principal";
+  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+      "ozone.scm.web.authentication.kerberos.keytab";
   /**
    * Never constructed.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index c8d4a80..e17f1c2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdds.scm.protocol;
 
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -31,6 +33,7 @@ import java.util.List;
  * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
  * to read/write a block.
  */
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index e8d85e0..d36bdf3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -17,6 +17,8 @@
 
 package org.apache.hadoop.hdds.scm.protocol;
 
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -27,11 +29,13 @@ import org.apache.hadoop.hdds.protocol.proto
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
  * that currently host a container.
  */
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerLocationProtocol {
   /**
    * Asks SCM where a container should be allocated. SCM responds with the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
index 837c95b..89bb066 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -18,9 +18,13 @@
 package org.apache.hadoop.hdds.scm.protocolPB;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
     .ScmBlockLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used from an HDFS node to StorageContainerManager.  This extends the
@@ -30,6 +34,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
     "org.apache.hadoop.ozone.protocol.ScmBlockLocationProtocol",
     protocolVersion = 1)
 @InterfaceAudience.Private
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocolPB
     extends ScmBlockLocationProtocolService.BlockingInterface {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
index f234ad3..3bd83f9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -21,7 +21,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerLocationProtocolProtos
     .StorageContainerLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used from an HDFS node to StorageContainerManager.  This extends the
@@ -30,6 +32,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
 @ProtocolInfo(protocolName =
     "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
     protocolVersion = 1)
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerLocationProtocolPB
     extends StorageContainerLocationProtocolService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index d1377be..ac5d864 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -254,6 +254,11 @@ public final class OzoneConfigKeys {
       "hdds.datanode.storage.utilization.critical.threshold";
   public static final double
       HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.75;
+
+  public static final String OZONE_SECURITY_ENABLED_KEY = "ozone.security.enabled";
+  public static final String OZONE_SYSTEM_TAGS_KEY = "ozone.system.tags";
+  public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
+
   /**
    * There is no need to instantiate this class.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 7a91610..7012946 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -785,14 +785,6 @@
     </description>
   </property>
   <property>
-    <name>ozone.scm.keytab.file</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in SCM.
-    </description>
-  </property>
-  <property>
     <name>ozone.scm.max.container.report.threads</name>
     <value>100</value>
     <tag>OZONE, PERFORMANCE</tag>
@@ -1086,4 +1078,37 @@
     </description>
   </property>
 
+  <property>
+    <name>ozone.security.enabled</name>
+    <value>false</value>
+    <tag> OZONE, SECURITY, FLAG</tag>
+    <description>True if security is enabled for ozone. When this property is true, hadoop.security.authentication should be Kerberos.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.kerberos.keytab.file</name>
+    <value></value>
+    <tag> OZONE, SECURITY</tag>
+    <description> The keytab file used by each SCM daemon to login as its
+      service principal. The principal name is configured with
+      ozone.scm.kerberos.principal.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.kerberos.principal</name>
+    <value></value>
+    <tag> OZONE, SECURITY</tag>
+    <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+  </property>
+
+  <property>
+    <name>ozone.scm.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+  </property>
+  <property>
+    <name>ozone.scm.web.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/HTTP.keytab</value>
+  </property>
+
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index a950a31..5b04c56 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -39,11 +39,15 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 
 import java.io.IOException;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * The protocol spoken between datanodes and SCM. For specifics please the
  * Protoc file that defines this protocol.
  */
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerDatanodeProtocol {
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 9b28b5a..9c32ef8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.protocolPB;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos
     .StorageContainerDatanodeProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used from a datanode to StorageContainerManager.  This extends
@@ -29,6 +32,9 @@ import org.apache.hadoop.ipc.ProtocolInfo;
 @ProtocolInfo(protocolName =
     "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
     protocolVersion = 1)
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerDatanodeProtocolPB extends
     StorageContainerDatanodeProtocolService.BlockingInterface {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 78f13cb..65619a4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -28,9 +28,11 @@ import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 import com.google.protobuf.BlockingService;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
@@ -52,6 +54,9 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
 import org.apache.hadoop.ozone.common.StorageInfo;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
@@ -70,6 +75,10 @@ import java.util.concurrent.TimeUnit;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 /**
@@ -140,6 +149,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    * Key = DatanodeUuid, value = ContainerStat.
    */
   private Cache<String, ContainerStat> containerReportCache;
+  private Configuration scmConf;
 
   /**
    * Creates a new StorageContainerManager. Configuration will be updated
@@ -148,13 +158,19 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    *
    * @param conf configuration
    */
-  private StorageContainerManager(OzoneConfiguration conf) throws IOException {
+  private StorageContainerManager(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
 
     final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
         OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
+    this.scmConf = conf;
     StorageContainerManager.initMetrics();
     initContainerReportCache(conf);
+    // Authenticate SCM if security is enabled
+    if (this.scmConf.getBoolean(OZONE_SECURITY_ENABLED_KEY,
+        OZONE_SECURITY_ENABLED_DEFAULT)) {
+      loginAsSCMUser(this.scmConf);
+    }
 
     scmStorage = new SCMStorage(conf);
     if (scmStorage.getState() != StorageState.INITIALIZED) {
@@ -185,6 +201,33 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   }
 
   /**
+   * Login as the configured user for SCM.
+   *
+   * @param conf
+   */
+  private void loginAsSCMUser(Configuration conf)
+      throws IOException, AuthenticationException {
+    LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
+            + "Principal: {}, keytab: {}", this.scmConf.get
+            (OZONE_SCM_KERBEROS_PRINCIPAL_KEY),
+        this.scmConf.get(OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY));
+
+    if (SecurityUtil.getAuthenticationMethod(conf).equals
+        (AuthenticationMethod.KERBEROS)) {
+      UserGroupInformation.setConfiguration(this.scmConf);
+      InetSocketAddress socAddr = HddsServerUtil
+          .getScmBlockClientBindAddress(conf);
+      SecurityUtil.login(conf, OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+          OZONE_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+    } else {
+      throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+          (conf) + " authentication method not support. "
+          + "SCM user login failed.");
+    }
+    LOG.info("SCM login successful.");
+  }
+
+  /**
    * Builds a message for logging startup information about an RPC server.
    *
    * @param description RPC server description
@@ -268,7 +311,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
 
   public static StorageContainerManager createSCM(String[] argv,
       OzoneConfiguration conf)
-      throws IOException {
+      throws IOException, AuthenticationException {
     if (!HddsUtils.isHddsEnabled(conf)) {
       System.err.println(
           "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index 75b2036..da936ad 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdds.scm.server;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.server.BaseHttpServer;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 
 import java.io.IOException;
 
@@ -63,11 +62,11 @@ public class StorageContainerManagerHttpServer extends BaseHttpServer {
   }
 
   @Override protected String getKeytabFile() {
-    return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE;
+    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
   }
 
   @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
   }
 
   @Override protected String getEnabledKey() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 94cc257..80b0a40 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.client.protocol;
 
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -33,6 +34,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 
 import java.io.IOException;
 import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * An implementer of this interface is capable of connecting to Ozone Cluster
@@ -42,6 +44,7 @@ import java.util.List;
  * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
  * {@link  org.apache.hadoop.ozone.client.rest.RestClient} for REST.
  */
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ClientProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-ozone/common/src/main/bin/start-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index 92bc4a8..55225a4 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -75,6 +75,13 @@ if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} ==
   exit 1
 fi
 
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+#  echo "Ozone is not supported in a security enabled cluster."
+#  exit 1
+#fi
+
 #---------------------------------------------------------
 # Check if ozone is enabled
 OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-ozone/common/src/main/bin/stop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
index be55be4..ff332f2 100644
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
@@ -47,13 +47,12 @@ else
   exit 1
 fi
 
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-
-if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
-  echo "Ozone is not supported in a security enabled cluster."
-  exit 1
-fi
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+#  echo "Ozone is not supported in a security enabled cluster."
+#  exit 1
+#fi
 
 #---------------------------------------------------------
 # Check if ozone is enabled

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-ozone/integration-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index c8a932c..4aa1aa5 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -42,6 +42,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-objectstore-service</artifactId>
       <scope>provided</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index f0bfef1..fbd9565 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.scm.protocolPB
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import org.slf4j.Logger;
@@ -289,9 +290,16 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     public MiniOzoneCluster build() throws IOException {
       DefaultMetricsSystem.setMiniClusterMode(true);
       initializeConfiguration();
-      StorageContainerManager scm = createSCM();
-      scm.start();
-      KeySpaceManager ksm = createKSM();
+      StorageContainerManager scm;
+      KeySpaceManager ksm;
+      try {
+        scm = createSCM();
+        scm.start();
+        ksm = createKSM();
+      } catch (AuthenticationException ex) {
+        throw new IOException("Unable to build MiniOzoneCluster. ", ex);
+      }
+
       ksm.start();
       List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
       hddsDatanodes.forEach((datanode) -> datanode.start(null));
@@ -318,7 +326,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
      *
      * @throws IOException
      */
-    private StorageContainerManager createSCM() throws IOException {
+    private StorageContainerManager createSCM()
+        throws IOException, AuthenticationException {
       configureSCM();
       SCMStorage scmStore = new SCMStorage(conf);
       scmStore.setClusterId(clusterId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
new file mode 100644
index 0000000..9c430ad
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+import java.util.UUID;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.KerberosAuthException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test class to for security enabled Ozone cluster.
+ */
+@InterfaceAudience.Private
+public final class TestSecureOzoneCluster {
+
+  private Logger LOGGER = LoggerFactory
+      .getLogger(TestSecureOzoneCluster.class);
+
+  private MiniKdc miniKdc;
+  private OzoneConfiguration conf;
+  private File workDir;
+  private static Properties securityProperties;
+  private File scmKeytab;
+  private File spnegoKeytab;
+  private String curUser;
+
+  @Before
+  public void init() {
+    try {
+      conf = new OzoneConfiguration();
+      startMiniKdc();
+      setSecureConfig(conf);
+      createCredentialsInKDC(conf, miniKdc);
+    } catch (IOException e) {
+      LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+    } catch (Exception e) {
+      LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+    }
+  }
+
+  private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
+      throws Exception {
+    createPrincipal(scmKeytab,
+        conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
+    createPrincipal(spnegoKeytab,
+        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+  }
+
+  private void createPrincipal(File keytab, String... principal)
+      throws Exception {
+    miniKdc.createPrincipal(keytab, principal);
+  }
+
+  private void startMiniKdc() throws Exception {
+    workDir = GenericTestUtils
+        .getTestDir(TestSecureOzoneCluster.class.getSimpleName());
+    securityProperties = MiniKdc.createConf();
+    miniKdc = new MiniKdc(securityProperties, workDir);
+    miniKdc.start();
+  }
+
+  private void setSecureConfig(Configuration conf) throws IOException {
+    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+    String host = KerberosUtil.getLocalHostName();
+    String realm = miniKdc.getRealm();
+    curUser = UserGroupInformation.getCurrentUser()
+        .getUserName();
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "kerberos");
+    conf.set(OZONE_ADMINISTRATORS, curUser);
+
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+        "scm/" + host + "@" + realm);
+    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+        "HTTP_SCM/" + host + "@" + realm);
+
+    scmKeytab = new File(workDir, "scm.keytab");
+    spnegoKeytab = new File(workDir, "http.keytab");
+
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+        scmKeytab.getAbsolutePath());
+    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
+        spnegoKeytab.getAbsolutePath());
+
+  }
+
+  @Test
+  public void testSecureScmStartupSuccess() throws Exception {
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    Path scmPath = Paths.get(path, "scm-meta");
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    SCMStorage scmStore = new SCMStorage(conf);
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    scmStore.setClusterId(clusterId);
+    scmStore.setScmId(scmId);
+    // writes the version file properties
+    scmStore.initialize();
+    StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
+    //Reads the SCM Info from SCM instance
+    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+    Assert.assertEquals(clusterId, scmInfo.getClusterId());
+    Assert.assertEquals(scmId, scmInfo.getScmId());
+  }
+
+  @Test
+  public void testSecureScmStartupFailure() throws Exception {
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    Path scmPath = Paths.get(path, "scm-meta");
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+        "scm@" + miniKdc.getRealm());
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "kerberos");
+
+    SCMStorage scmStore = new SCMStorage(conf);
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    scmStore.setClusterId(clusterId);
+    scmStore.setScmId(scmId);
+    // writes the version file properties
+    scmStore.initialize();
+    LambdaTestUtils.intercept(IOException.class,
+        "Running in secure mode, but config doesn't have a keytab",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+        "scm/_HOST@EXAMPLE.com");
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+        "/etc/security/keytabs/scm.keytab");
+
+    LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
+            + "to login: for principal:",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "OAuth2");
+
+    LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
+            + " attribute value for hadoop.security.authentication of OAuth2",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "KERBEROS_SSL");
+    LambdaTestUtils.intercept(AuthenticationException.class,
+        "KERBEROS_SSL authentication method not support.",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9086e1fb/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 0c1d8f2..17cfc22 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.junit.Rule;
 import org.junit.Assert;
 import org.junit.Test;
@@ -407,7 +408,8 @@ public class TestStorageContainerManager {
   }
 
   @Test
-  public void testSCMInitializationFailure() throws IOException {
+  public void testSCMInitializationFailure()
+      throws IOException, AuthenticationException {
     OzoneConfiguration conf = new OzoneConfiguration();
     final String path =
         GenericTestUtils.getTempPath(UUID.randomUUID().toString());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: MAPREDUCE-7098. Upgrade common-langs version to 3.7 in hadoop-mapreduce-project

Posted by xy...@apache.org.
MAPREDUCE-7098. Upgrade common-langs version to 3.7 in hadoop-mapreduce-project

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1e2b809
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1e2b809
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1e2b809

Branch: refs/heads/HDDS-4
Commit: d1e2b8098078af4af31392ed7f2fa350a7d1c3b2
Parents: 02c4b89
Author: Takanobu Asanuma <ta...@yahoo-corp.jp>
Authored: Thu May 31 18:31:10 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu May 31 18:31:10 2018 +0900

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/util/StringUtils.java   |  2 +-
 .../java/org/apache/hadoop/util/TestStringUtils.java    |  2 +-
 .../mapreduce/v2/app/job/impl/TaskAttemptImpl.java      |  2 +-
 .../hadoop/mapreduce/v2/app/webapp/AppController.java   |  2 +-
 .../apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java | 10 +++++-----
 .../hadoop/mapreduce/v2/app/webapp/TasksBlock.java      |  6 +++---
 .../mapreduce/v2/app/webapp/TestAppController.java      |  2 +-
 .../hadoop/mapreduce/checkpoint/RandomNameCNS.java      |  2 +-
 .../jobhistory/HumanReadableHistoryViewerPrinter.java   |  2 +-
 .../lib/output/PathOutputCommitterFactory.java          |  2 +-
 .../apache/hadoop/mapreduce/security/TokenCache.java    |  2 +-
 .../java/org/apache/hadoop/mapreduce/tools/CLI.java     |  2 +-
 .../mapreduce/lib/partition/TestRehashPartitioner.java  |  2 +-
 .../hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java      |  8 ++++----
 .../hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java       | 10 +++++-----
 .../main/java/org/apache/hadoop/mapred/ClientCache.java |  2 +-
 .../org/apache/hadoop/mapred/ClientServiceDelegate.java |  2 +-
 .../java/org/apache/hadoop/mapred/NotRunningJob.java    | 12 ++++++------
 .../main/java/org/apache/hadoop/mapred/YARNRunner.java  |  2 +-
 .../java/org/apache/hadoop/mapred/TestJobCounters.java  |  2 +-
 .../hadoop/mapreduce/lib/db/TestDBOutputFormat.java     |  2 +-
 .../hadoop-mapreduce-client/pom.xml                     |  5 -----
 22 files changed, 39 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index ebe7013..33a2010 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -36,7 +36,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 1f474f8..96a6482 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -40,7 +40,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
 
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
 import org.apache.hadoop.test.UnitTestcaseTimeLimit;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 0c830f8..63e7456 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.app.job.impl;
 
-import static org.apache.commons.lang.StringUtils.isEmpty;
+import static org.apache.commons.lang3.StringUtils.isEmpty;
 
 import java.io.IOException;
 import java.net.InetAddress;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index ad5bc2a..405c5f2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -25,7 +25,7 @@ import java.net.URLDecoder;
 
 import javax.servlet.http.HttpServletResponse;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index bd7f7a9..944f65e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -27,7 +27,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 import java.util.EnumSet;
 import java.util.Collection;
 
-import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -134,8 +134,8 @@ public class TaskPage extends AppView {
         .append(getAttemptId(taskId, ta)).append("\",\"")
         .append(progress).append("\",\"")
         .append(ta.getState().toString()).append("\",\"")
-        .append(StringEscapeUtils.escapeJavaScript(
-              StringEscapeUtils.escapeHtml(ta.getStatus()))).append("\",\"")
+        .append(StringEscapeUtils.escapeEcmaScript(
+              StringEscapeUtils.escapeHtml4(ta.getStatus()))).append("\",\"")
 
         .append(nodeHttpAddr == null ? "N/A" :
             "<a class='nodelink' href='" + MRWebAppUtil.getYARNWebappScheme() + nodeHttpAddr + "'>"
@@ -151,8 +151,8 @@ public class TaskPage extends AppView {
         .append(ta.getStartTime()).append("\",\"")
         .append(ta.getFinishTime()).append("\",\"")
         .append(ta.getElapsedTime()).append("\",\"")
-        .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-          diag)));
+        .append(StringEscapeUtils.escapeEcmaScript(
+            StringEscapeUtils.escapeHtml4(diag)));
         if (enableUIActions) {
           attemptsTableData.append("\",\"");
           if (EnumSet.of(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
index 8d92dd3..a2d8fa9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
@@ -24,7 +24,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
 
-import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
@@ -103,8 +103,8 @@ public class TasksBlock extends HtmlBlock {
       .append(join(pct, '%')).append("'> ").append("<div class='")
       .append(C_PROGRESSBAR_VALUE).append("' style='")
       .append(join("width:", pct, '%')).append("'> </div> </div>\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(
-              StringEscapeUtils.escapeHtml(info.getStatus()))).append("\",\"")
+      .append(StringEscapeUtils.escapeEcmaScript(
+              StringEscapeUtils.escapeHtml4(info.getStatus()))).append("\",\"")
 
       .append(info.getState()).append("\",\"")
       .append(info.getStartTime()).append("\",\"")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
index 3f685b0..ba5c430 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
@@ -22,7 +22,7 @@ import static org.mockito.Mockito.*;
 import java.io.IOException;
 import java.util.Iterator;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobACL;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/checkpoint/RandomNameCNS.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/checkpoint/RandomNameCNS.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/checkpoint/RandomNameCNS.java
index 7387c1c..9f97309 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/checkpoint/RandomNameCNS.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/checkpoint/RandomNameCNS.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.mapreduce.checkpoint;
 
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 
 /**
  * Simple naming service that generates a random checkpoint name.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
index 060ba24..e43cbd9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.mapreduce.jobhistory;
 
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.mapred.JobStatus;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
index 7d214f2..f3f0dab 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
@@ -23,7 +23,7 @@ import java.io.IOException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
index 1156c67..5917f54 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
index ca59dba..4108146 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
@@ -31,7 +31,7 @@ import java.util.HashSet;
 import java.util.Arrays;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceAudience.Private;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestRehashPartitioner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestRehashPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestRehashPartitioner.java
index d2048c1..d86bdd3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestRehashPartitioner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestRehashPartitioner.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.*;
 import java.util.Arrays;
 import java.util.Collections;
 
-import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.NullWritable;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
index ef563f6..1a2b2fe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 
-import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
@@ -83,11 +83,11 @@ public class HsJobsBlock extends HtmlBlock {
       .append(dateFormat.format(new Date(job.getFinishTime()))).append("\",\"")
       .append("<a href='").append(url("job", job.getId())).append("'>")
       .append(job.getId()).append("</a>\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
+      .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(
         job.getName()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
+      .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(
         job.getUserName()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
+      .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(
         job.getQueueName()))).append("\",\"")
       .append(job.getState()).append("\",\"")
       .append(String.valueOf(job.getMapsTotal())).append("\",\"")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
index c5117ed..e8e76d1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
@@ -29,7 +29,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 
 import java.util.Collection;
 
-import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
@@ -147,8 +147,8 @@ public class HsTaskPage extends HsView {
         attemptsTableData.append("[\"")
         .append(getAttemptId(taskId, ta)).append("\",\"")
         .append(ta.getState()).append("\",\"")
-        .append(StringEscapeUtils.escapeJavaScript(
-              StringEscapeUtils.escapeHtml(ta.getStatus()))).append("\",\"")
+        .append(StringEscapeUtils.escapeEcmaScript(
+              StringEscapeUtils.escapeHtml4(ta.getStatus()))).append("\",\"")
 
         .append("<a class='nodelink' href='" + MRWebAppUtil.getYARNWebappScheme() + nodeHttpAddr + "'>")
         .append(nodeRackName + "/" + nodeHttpAddr + "</a>\",\"")
@@ -171,8 +171,8 @@ public class HsTaskPage extends HsView {
           .append(elapsedReduceTime).append("\",\"");
         }
           attemptsTableData.append(attemptElapsed).append("\",\"")
-          .append(StringEscapeUtils.escapeJavaScript(
-              StringEscapeUtils.escapeHtml(ta.getNote())))
+          .append(StringEscapeUtils.escapeEcmaScript(
+              StringEscapeUtils.escapeHtml4(ta.getNote())))
           .append("\"],\n");
       }
        //Remove the last comma and close off the array of arrays

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
index 8268d1e..f4babf9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.HashMap;
 import java.util.Map;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.JobID;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 792e496..2c2ff1f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -28,7 +28,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.RPC;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
index bfdc6db..152c8af 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
@@ -23,7 +23,7 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.HashMap;
 
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
@@ -167,7 +167,7 @@ public class NotRunningJob implements MRClientProtocol {
   public GetTaskAttemptReportResponse getTaskAttemptReport(
       GetTaskAttemptReportRequest request) throws IOException {
     //not invoked by anybody
-    throw new NotImplementedException();
+    throw new NotImplementedException("Code is not implemented");
   }
 
   @Override
@@ -222,26 +222,26 @@ public class NotRunningJob implements MRClientProtocol {
   public GetDelegationTokenResponse getDelegationToken(
       GetDelegationTokenRequest request) throws IOException {
     /* Should not be invoked by anyone. */
-    throw new NotImplementedException();
+    throw new NotImplementedException("Code is not implemented");
   }
 
   @Override
   public RenewDelegationTokenResponse renewDelegationToken(
       RenewDelegationTokenRequest request) throws IOException {
     /* Should not be invoked by anyone. */
-    throw new NotImplementedException();
+    throw new NotImplementedException("Code is not implemented");
   }
 
   @Override
   public CancelDelegationTokenResponse cancelDelegationToken(
       CancelDelegationTokenRequest request) throws IOException {
     /* Should not be invoked by anyone. */
-    throw new NotImplementedException();
+    throw new NotImplementedException("Code is not implemented");
   }
 
   @Override
   public InetSocketAddress getConnectAddress() {
     /* Should not be invoked by anyone.  Normally used to set token service */
-    throw new NotImplementedException();
+    throw new NotImplementedException("Code is not implemented");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 496ff10..7d33ed2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.mapred;
 
-import static org.apache.commons.lang.StringUtils.isEmpty;
+import static org.apache.commons.lang3.StringUtils.isEmpty;
 import static org.apache.hadoop.mapreduce.MRJobConfig.MR_AM_RESOURCE_PREFIX;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java
index 850c00a..18ef64d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java
@@ -29,7 +29,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.StringTokenizer;
 
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
index e547c8a..cd972bb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.mapreduce.lib.db;
 import java.io.IOException;
 import java.lang.reflect.Field;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e2b809/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index 324825f..430116b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -143,11 +143,6 @@
       <scope>provided</scope>
     </dependency>
     <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
       <groupId>commons-collections</groupId>
       <artifactId>commons-collections</artifactId>
       <scope>provided</scope>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: YARN-8367. Fix NPE in SingleConstraintAppPlacementAllocator when placement constraint in SchedulingRequest is null. Contributed by Weiwei Yang.

Posted by xy...@apache.org.
YARN-8367. Fix NPE in SingleConstraintAppPlacementAllocator when placement constraint in SchedulingRequest is null. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6468071f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6468071f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6468071f

Branch: refs/heads/HDDS-4
Commit: 6468071f137e6d918a7b4799ad54558fa26b25ce
Parents: d1e2b80
Author: Weiwei Yang <ww...@apache.org>
Authored: Thu May 31 20:46:39 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Thu May 31 20:46:39 2018 +0800

----------------------------------------------------------------------
 .../SingleConstraintAppPlacementAllocator.java  | 187 ++++++++++---------
 ...estSchedulingRequestContainerAllocation.java |  84 +++++++++
 2 files changed, 182 insertions(+), 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6468071f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
index 1fc6bad..2b610f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
 import org.apache.commons.collections.IteratorUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -238,110 +239,118 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
           "Only GUARANTEED execution type is supported.");
     }
 
-    PlacementConstraint constraint =
-        newSchedulingRequest.getPlacementConstraint();
-
-    // We only accept SingleConstraint
-    PlacementConstraint.AbstractConstraint ac = constraint.getConstraintExpr();
-    if (!(ac instanceof PlacementConstraint.SingleConstraint)) {
-      throwExceptionWithMetaInfo(
-          "Only accepts " + PlacementConstraint.SingleConstraint.class.getName()
-              + " as constraint-expression. Rejecting the new added "
-              + "constraint-expression.class=" + ac.getClass().getName());
-    }
-
-    PlacementConstraint.SingleConstraint singleConstraint =
-        (PlacementConstraint.SingleConstraint) ac;
-
-    // Make sure it is an anti-affinity request (actually this implementation
-    // should be able to support both affinity / anti-affinity without much
-    // effort. Considering potential test effort required. Limit to
-    // anti-affinity to intra-app and scope is node.
-    if (!singleConstraint.getScope().equals(PlacementConstraints.NODE)) {
-      throwExceptionWithMetaInfo(
-          "Only support scope=" + PlacementConstraints.NODE
-              + "now. PlacementConstraint=" + singleConstraint);
-    }
-
-    if (singleConstraint.getMinCardinality() != 0
-        || singleConstraint.getMaxCardinality() != 0) {
-      throwExceptionWithMetaInfo(
-          "Only support anti-affinity, which is: minCardinality=0, "
-              + "maxCardinality=1");
-    }
-
-    Set<PlacementConstraint.TargetExpression> targetExpressionSet =
-        singleConstraint.getTargetExpressions();
-    if (targetExpressionSet == null || targetExpressionSet.isEmpty()) {
-      throwExceptionWithMetaInfo(
-          "TargetExpression should not be null or empty");
-    }
-
-    // Set node partition
+    // Node partition
     String nodePartition = null;
-
     // Target allocation tags
     Set<String> targetAllocationTags = null;
 
-    for (PlacementConstraint.TargetExpression targetExpression : targetExpressionSet) {
-      // Handle node partition
-      if (targetExpression.getTargetType().equals(
-          PlacementConstraint.TargetExpression.TargetType.NODE_ATTRIBUTE)) {
-        // For node attribute target, we only support Partition now. And once
-        // YARN-3409 is merged, we will support node attribute.
-        if (!targetExpression.getTargetKey().equals(NODE_PARTITION)) {
-          throwExceptionWithMetaInfo("When TargetType="
-              + PlacementConstraint.TargetExpression.TargetType.NODE_ATTRIBUTE
-              + " only " + NODE_PARTITION + " is accepted as TargetKey.");
-        }
+    PlacementConstraint constraint =
+        newSchedulingRequest.getPlacementConstraint();
 
-        if (nodePartition != null) {
-          // This means we have duplicated node partition entry inside placement
-          // constraint, which might be set by mistake.
-          throwExceptionWithMetaInfo(
-              "Only one node partition targetExpression is allowed");
-        }
+    if (constraint != null) {
+      // We only accept SingleConstraint
+      PlacementConstraint.AbstractConstraint ac = constraint
+          .getConstraintExpr();
+      if (!(ac instanceof PlacementConstraint.SingleConstraint)) {
+        throwExceptionWithMetaInfo("Only accepts "
+            + PlacementConstraint.SingleConstraint.class.getName()
+                + " as constraint-expression. Rejecting the new added "
+            + "constraint-expression.class=" + ac.getClass().getName());
+      }
 
-        Set<String> values = targetExpression.getTargetValues();
-        if (values == null || values.isEmpty()) {
-          nodePartition = RMNodeLabelsManager.NO_LABEL;
-          continue;
-        }
+      PlacementConstraint.SingleConstraint singleConstraint =
+          (PlacementConstraint.SingleConstraint) ac;
+
+      // Make sure it is an anti-affinity request (actually this implementation
+      // should be able to support both affinity / anti-affinity without much
+      // effort. Considering potential test effort required. Limit to
+      // anti-affinity to intra-app and scope is node.
+      if (!singleConstraint.getScope().equals(PlacementConstraints.NODE)) {
+        throwExceptionWithMetaInfo(
+            "Only support scope=" + PlacementConstraints.NODE
+                + "now. PlacementConstraint=" + singleConstraint);
+      }
 
-        if (values.size() > 1) {
-          throwExceptionWithMetaInfo("Inside one targetExpression, we only "
-              + "support affinity to at most one node partition now");
-        }
+      if (singleConstraint.getMinCardinality() != 0
+          || singleConstraint.getMaxCardinality() != 0) {
+        throwExceptionWithMetaInfo(
+            "Only support anti-affinity, which is: minCardinality=0, "
+                + "maxCardinality=1");
+      }
 
-        nodePartition = values.iterator().next();
-      } else if (targetExpression.getTargetType().equals(
-          PlacementConstraint.TargetExpression.TargetType.ALLOCATION_TAG)) {
-        // Handle allocation tags
-        if (targetAllocationTags != null) {
-          // This means we have duplicated AllocationTag expressions entries
-          // inside placement constraint, which might be set by mistake.
-          throwExceptionWithMetaInfo(
-              "Only one AllocationTag targetExpression is allowed");
-        }
+      Set<PlacementConstraint.TargetExpression> targetExpressionSet =
+          singleConstraint.getTargetExpressions();
+      if (targetExpressionSet == null || targetExpressionSet.isEmpty()) {
+        throwExceptionWithMetaInfo(
+            "TargetExpression should not be null or empty");
+      }
 
-        if (targetExpression.getTargetValues() == null || targetExpression
-            .getTargetValues().isEmpty()) {
-          throwExceptionWithMetaInfo("Failed to find allocation tags from "
-              + "TargetExpressions or couldn't find self-app target.");
+      for (PlacementConstraint.TargetExpression targetExpression :
+          targetExpressionSet) {
+        // Handle node partition
+        if (targetExpression.getTargetType().equals(
+            PlacementConstraint.TargetExpression.TargetType.NODE_ATTRIBUTE)) {
+          // For node attribute target, we only support Partition now. And once
+          // YARN-3409 is merged, we will support node attribute.
+          if (!targetExpression.getTargetKey().equals(NODE_PARTITION)) {
+            throwExceptionWithMetaInfo("When TargetType="
+                + PlacementConstraint.TargetExpression.TargetType.NODE_ATTRIBUTE
+                + " only " + NODE_PARTITION + " is accepted as TargetKey.");
+          }
+
+          if (nodePartition != null) {
+            // This means we have duplicated node partition entry
+            // inside placement constraint, which might be set by mistake.
+            throwExceptionWithMetaInfo(
+                "Only one node partition targetExpression is allowed");
+          }
+
+          Set<String> values = targetExpression.getTargetValues();
+          if (values == null || values.isEmpty()) {
+            nodePartition = RMNodeLabelsManager.NO_LABEL;
+            continue;
+          }
+
+          if (values.size() > 1) {
+            throwExceptionWithMetaInfo("Inside one targetExpression, we only "
+                + "support affinity to at most one node partition now");
+          }
+
+          nodePartition = values.iterator().next();
+        } else if (targetExpression.getTargetType().equals(
+            PlacementConstraint.TargetExpression.TargetType.ALLOCATION_TAG)) {
+          // Handle allocation tags
+          if (targetAllocationTags != null) {
+            // This means we have duplicated AllocationTag expressions entries
+            // inside placement constraint, which might be set by mistake.
+            throwExceptionWithMetaInfo(
+                "Only one AllocationTag targetExpression is allowed");
+          }
+
+          if (targetExpression.getTargetValues() == null ||
+              targetExpression.getTargetValues().isEmpty()) {
+            throwExceptionWithMetaInfo("Failed to find allocation tags from "
+                + "TargetExpressions or couldn't find self-app target.");
+          }
+
+          targetAllocationTags = new HashSet<>(
+              targetExpression.getTargetValues());
         }
+      }
 
-        targetAllocationTags = new HashSet<>(
-            targetExpression.getTargetValues());
+      if (targetAllocationTags == null) {
+        // That means we don't have ALLOCATION_TAG specified
+        throwExceptionWithMetaInfo(
+            "Couldn't find target expression with type == ALLOCATION_TAG,"
+                + " it is required to include one and only one target"
+                + " expression with type == ALLOCATION_TAG");
       }
     }
 
+    // If this scheduling request doesn't contain a placement constraint,
+    // we set allocation tags an empty set.
     if (targetAllocationTags == null) {
-      // That means we don't have ALLOCATION_TAG specified
-      throwExceptionWithMetaInfo(
-          "Couldn't find target expression with type == ALLOCATION_TAG, it is "
-              + "required to include one and only one target expression with "
-              + "type == ALLOCATION_TAG");
-
+      targetAllocationTags = ImmutableSet.of();
     }
 
     if (nodePartition == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6468071f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
index 13247a7..f23fd8f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
@@ -18,8 +18,15 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.TargetApplicationsNamespace;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -39,6 +46,8 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.*;
+
 public class TestSchedulingRequestContainerAllocation {
   private final int GB = 1024;
 
@@ -393,4 +402,79 @@ public class TestSchedulingRequestContainerAllocation {
     Assert.assertTrue(caughtException);
     rm1.close();
   }
+
+  @Test
+  public void testSchedulingRequestWithNullConstraint() throws Exception {
+    Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
+        new Configuration());
+    csConf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+        YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
+
+    // inject node label manager
+    MockRM rm1 = new MockRM(csConf) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm1.getRMContext().setNodeLabelManager(mgr);
+    rm1.start();
+
+    // 4 NMs.
+    MockNM[] nms = new MockNM[4];
+    RMNode[] rmNodes = new RMNode[4];
+    for (int i = 0; i < 4; i++) {
+      nms[i] = rm1.registerNode("192.168.0." + i + ":1234", 10 * GB);
+      rmNodes[i] = rm1.getRMContext().getRMNodes().get(nms[i].getNodeId());
+    }
+
+    // app1 -> c
+    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "c");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nms[0]);
+
+    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+
+    PlacementConstraint constraint = PlacementConstraints
+        .targetNotIn("node", allocationTag("t1"))
+        .build();
+    SchedulingRequest sc = SchedulingRequest
+        .newInstance(0, Priority.newInstance(1),
+            ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED),
+            ImmutableSet.of("t1"),
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)),
+            constraint);
+    AllocateRequest request = AllocateRequest.newBuilder()
+        .schedulingRequests(ImmutableList.of(sc)).build();
+    am1.allocate(request);
+
+    for (int i = 0; i < 4; i++) {
+      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
+    }
+
+    FiCaSchedulerApp schedApp = cs.getApplicationAttempt(
+        am1.getApplicationAttemptId());
+    Assert.assertEquals(2, schedApp.getLiveContainers().size());
+
+
+    // Send another request with null placement constraint,
+    // ensure there is no NPE while handling this request.
+    sc = SchedulingRequest
+        .newInstance(1, Priority.newInstance(1),
+            ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED),
+            ImmutableSet.of("t2"),
+            ResourceSizing.newInstance(2, Resource.newInstance(1024, 1)),
+            null);
+    AllocateRequest request1 = AllocateRequest.newBuilder()
+        .schedulingRequests(ImmutableList.of(sc)).build();
+    am1.allocate(request1);
+
+    for (int i = 0; i < 4; i++) {
+      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
+    }
+
+    Assert.assertEquals(4, schedApp.getLiveContainers().size());
+
+    rm1.close();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda)

Posted by xy...@apache.org.
YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda)

Change-Id: Ieea6f3eeb83c90cd74233fea896f0fcd0f325d5f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24c842d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24c842d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24c842d

Branch: refs/heads/HDDS-4
Commit: f24c842d52e166e8566337ef93c96438f1c870d8
Parents: 8605a38
Author: Wangda Tan <wa...@apache.org>
Authored: Fri May 25 21:53:20 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri May 25 21:53:20 2018 -0700

----------------------------------------------------------------------
 .../server/resourcemanager/ResourceManager.java |   1 +
 .../scheduler/AbstractYarnScheduler.java        |   5 +
 .../scheduler/ResourceScheduler.java            |   5 +
 .../scheduler/capacity/CapacityScheduler.java   |  31 ++++-
 .../capacity/CapacitySchedulerMetrics.java      | 119 +++++++++++++++++++
 .../TestCapacitySchedulerMetrics.java           | 110 +++++++++++++++++
 6 files changed, 269 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 05745ec..c533111 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1216,6 +1216,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
   void reinitialize(boolean initialize) {
     ClusterMetrics.destroy();
     QueueMetrics.clearQueueMetrics();
+    getResourceScheduler().resetSchedulerMetrics();
     if (initialize) {
       resetRMContext();
       createAndInitActiveServices(true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index b2747f7..18c7b4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1464,4 +1464,9 @@ public abstract class AbstractYarnScheduler
       SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
     return false;
   }
+
+  @Override
+  public void resetSchedulerMetrics() {
+    // reset scheduler metrics
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
index 5a56ac7..dcb6edd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
@@ -71,4 +71,9 @@ public interface ResourceScheduler extends YarnScheduler, Recoverable {
    */
   boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
       SchedulingRequest schedulingRequest, SchedulerNode schedulerNode);
+
+  /**
+   * Reset scheduler metrics.
+   */
+  void resetSchedulerMetrics();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 162d3bb..1c9bf6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1252,6 +1252,7 @@ public class CapacityScheduler extends
 
   @Override
   protected void nodeUpdate(RMNode rmNode) {
+    long begin = System.nanoTime();
     try {
       readLock.lock();
       setLastNodeUpdateTime(Time.now());
@@ -1279,6 +1280,9 @@ public class CapacityScheduler extends
         writeLock.unlock();
       }
     }
+
+    long latency = System.nanoTime() - begin;
+    CapacitySchedulerMetrics.getMetrics().addNodeUpdate(latency);
   }
 
   /**
@@ -1643,17 +1647,28 @@ public class CapacityScheduler extends
       return null;
     }
 
+    long startTime = System.nanoTime();
+
     // Backward compatible way to make sure previous behavior which allocation
     // driven by node heartbeat works.
     FiCaSchedulerNode node = CandidateNodeSetUtils.getSingleNode(candidates);
 
     // We have two different logics to handle allocation on single node / multi
     // nodes.
+    CSAssignment assignment;
     if (null != node) {
-      return allocateContainerOnSingleNode(candidates, node, withNodeHeartbeat);
+      assignment = allocateContainerOnSingleNode(candidates,
+          node, withNodeHeartbeat);
     } else{
-      return allocateContainersOnMultiNodes(candidates);
+      assignment = allocateContainersOnMultiNodes(candidates);
+    }
+
+    if (assignment != null && assignment.getAssignmentInformation() != null
+        && assignment.getAssignmentInformation().getNumAllocations() > 0) {
+      long allocateTime = System.nanoTime() - startTime;
+      CapacitySchedulerMetrics.getMetrics().addAllocate(allocateTime);
     }
+    return assignment;
   }
 
   @Override
@@ -2806,6 +2821,7 @@ public class CapacityScheduler extends
   @Override
   public boolean tryCommit(Resource cluster, ResourceCommitRequest r,
       boolean updatePending) {
+    long commitStart = System.nanoTime();
     ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
         (ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>) r;
 
@@ -2844,9 +2860,15 @@ public class CapacityScheduler extends
       if (app != null && attemptId.equals(app.getApplicationAttemptId())) {
         if (app.accept(cluster, request, updatePending)
             && app.apply(cluster, request, updatePending)) {
+          long commitSuccess = System.nanoTime() - commitStart;
+          CapacitySchedulerMetrics.getMetrics()
+              .addCommitSuccess(commitSuccess);
           LOG.info("Allocation proposal accepted");
           isSuccess = true;
         } else{
+          long commitFailed = System.nanoTime() - commitStart;
+          CapacitySchedulerMetrics.getMetrics()
+              .addCommitFailure(commitFailed);
           LOG.info("Failed to accept allocation proposal");
         }
 
@@ -3029,4 +3051,9 @@ public class CapacityScheduler extends
     }
     return autoCreatedLeafQueue;
   }
+
+  @Override
+  public void resetSchedulerMetrics() {
+    CapacitySchedulerMetrics.destroy();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java
new file mode 100644
index 0000000..5f8988b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
+/**
+ * Metrics for capacity scheduler.
+ */
+@InterfaceAudience.Private
+@Metrics(context="yarn")
+public class CapacitySchedulerMetrics {
+
+  private static AtomicBoolean isInitialized = new AtomicBoolean(false);
+
+  private static final MetricsInfo RECORD_INFO =
+      info("CapacitySchedulerMetrics",
+          "Metrics for the Yarn Capacity Scheduler");
+
+  @Metric("Scheduler allocate containers") MutableRate allocate;
+  @Metric("Scheduler commit success") MutableRate commitSuccess;
+  @Metric("Scheduler commit failure") MutableRate commitFailure;
+  @Metric("Scheduler node update") MutableRate nodeUpdate;
+
+  private static volatile CapacitySchedulerMetrics INSTANCE = null;
+  private static MetricsRegistry registry;
+
+  public static CapacitySchedulerMetrics getMetrics() {
+    if(!isInitialized.get()){
+      synchronized (CapacitySchedulerMetrics.class) {
+        if(INSTANCE == null){
+          INSTANCE = new CapacitySchedulerMetrics();
+          registerMetrics();
+          isInitialized.set(true);
+        }
+      }
+    }
+    return INSTANCE;
+  }
+
+  private static void registerMetrics() {
+    registry = new MetricsRegistry(RECORD_INFO);
+    registry.tag(RECORD_INFO, "ResourceManager");
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    if (ms != null) {
+      ms.register("CapacitySchedulerMetrics",
+          "Metrics for the Yarn Capacity Scheduler", INSTANCE);
+    }
+  }
+
+  @VisibleForTesting
+  public synchronized static void destroy() {
+    isInitialized.set(false);
+    INSTANCE = null;
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    if (ms != null) {
+      ms.unregisterSource("CapacitySchedulerMetrics");
+    }
+  }
+
+  public void addAllocate(long latency) {
+    this.allocate.add(latency);
+  }
+
+  public void addCommitSuccess(long latency) {
+    this.commitSuccess.add(latency);
+  }
+
+  public void addCommitFailure(long latency) {
+    this.commitFailure.add(latency);
+  }
+
+  public void addNodeUpdate(long latency) {
+    this.nodeUpdate.add(latency);
+  }
+
+  @VisibleForTesting
+  public long getNumOfNodeUpdate() {
+    return this.nodeUpdate.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumOfAllocates() {
+    return this.allocate.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumOfCommitSuccess() {
+    return this.commitSuccess.lastStat().numSamples();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java
new file mode 100644
index 0000000..eaa966a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerMetrics;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Test class for CS metrics.
+ */
+public class TestCapacitySchedulerMetrics {
+
+  private MockRM rm;
+
+  @Test
+  public void testCSMetrics() throws Exception {
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
+
+    RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+    mgr.init(conf);
+    rm = new MockRM(conf) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm.getRMContext().setNodeLabelManager(mgr);
+    rm.start();
+
+    MockNM nm1 = rm.registerNode("host1:1234", 2048);
+    MockNM nm2 = rm.registerNode("host2:1234", 2048);
+    nm1.nodeHeartbeat(true);
+    nm2.nodeHeartbeat(true);
+
+    CapacitySchedulerMetrics csMetrics = CapacitySchedulerMetrics.getMetrics();
+    Assert.assertNotNull(csMetrics);
+    try {
+      GenericTestUtils.waitFor(()
+          -> csMetrics.getNumOfNodeUpdate() == 2, 100, 3000);
+    } catch(TimeoutException e) {
+      Assert.fail("CS metrics not updated on node-update events.");
+    }
+
+    Assert.assertEquals(0, csMetrics.getNumOfAllocates());
+    Assert.assertEquals(0, csMetrics.getNumOfCommitSuccess());
+
+    RMApp rmApp = rm.submitApp(1024, "app", "user", null, false,
+        "default", 1, null, null, false);
+    MockAM am = MockRM.launchAMWhenAsyncSchedulingEnabled(rmApp, rm);
+    am.registerAppAttempt();
+    am.allocate("*", 1024, 1, new ArrayList<>());
+
+    nm1.nodeHeartbeat(true);
+    nm2.nodeHeartbeat(true);
+
+    // Verify HB metrics updated
+    try {
+      GenericTestUtils.waitFor(()
+          -> csMetrics.getNumOfNodeUpdate() == 4, 100, 3000);
+    } catch(TimeoutException e) {
+      Assert.fail("CS metrics not updated on node-update events.");
+    }
+
+    // For async mode, the number of alloc might be bigger than 1
+    Assert.assertTrue(csMetrics.getNumOfAllocates() > 0);
+    // But there will be only 2 successful commit (1 AM + 1 task)
+    Assert.assertEquals(2, csMetrics.getNumOfCommitSuccess());
+  }
+
+  @After
+  public void tearDown() {
+    if (rm != null) {
+      rm.stop();
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover

Posted by xy...@apache.org.
HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61df174e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61df174e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61df174e

Branch: refs/heads/HDDS-4
Commit: 61df174e8b3d582183306cabfa2347c8b96322ff
Parents: 04757e5
Author: Karthik Palanisamy <ka...@gmail.com>
Authored: Mon May 28 19:41:07 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 28 19:41:07 2018 +0900

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java   | 2 +-
 .../hadoop-common/src/main/resources/core-default.xml              | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index a8c19ab..9295288 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -63,7 +63,7 @@ public abstract class ZKFailoverController {
   
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
-  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
+  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 10*1000;
   private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode";
   public static final String ZK_ACL_KEY = "ha.zookeeper.acl";
   private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 9564587..75acf48 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2168,7 +2168,7 @@
 
 <property>
   <name>ha.zookeeper.session-timeout.ms</name>
-  <value>5000</value>
+  <value>10000</value>
   <description>
     The session timeout to use when the ZKFC connects to ZooKeeper.
     Setting this value to a lower value implies that server crashes


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

Posted by xy...@apache.org.
Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

This reverts commit 996a627b289947af3894bf83e7b63ec702a665cd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49cd77df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49cd77df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49cd77df

Branch: refs/heads/HDDS-4
Commit: 49cd77df9964a0b338480ec4dc24883e2c35e5c6
Parents: a6e7091
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 15 16:56:24 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu May 31 08:49:34 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49cd77df/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 1857fc4..9f7fc84 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -129,6 +129,18 @@
     </description>
   </property>
   <property>
+    <name>dfs.ratis.client.request.timeout.duration</name>
+    <value>3s</value>
+    <tag>OZONE, RATIS, MANAGEMENT</tag>
+    <description>The timeout duration for ratis client request.</description>
+  </property>
+  <property>
+    <name>dfs.ratis.server.request.timeout.duration</name>
+    <value>3s</value>
+    <tag>OZONE, RATIS, MANAGEMENT</tag>
+    <description>The timeout duration for ratis server request.</description>
+  </property>
+  <property>
     <name>ozone.container.report.interval</name>
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: YARN-8377: Javadoc build failed in hadoop-yarn-server-nodemanager. Contributed by Takanobu Asanuma

Posted by xy...@apache.org.
YARN-8377: Javadoc build failed in hadoop-yarn-server-nodemanager. Contributed by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e44c0849
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e44c0849
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e44c0849

Branch: refs/heads/HDDS-4
Commit: e44c0849d7982c8f1ed43af25d2092090881d19f
Parents: 3b34148
Author: Eric E Payne <er...@oath.com>
Authored: Wed May 30 16:50:19 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Wed May 30 16:50:19 2018 +0000

----------------------------------------------------------------------
 .../containermanager/container/SlidingWindowRetryPolicy.java    | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e44c0849/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
index 36a8b91..9360669 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java
@@ -85,8 +85,9 @@ public class SlidingWindowRetryPolicy {
    * Updates remaining retries and the restart time when
    * required in the retryContext.
    * <p>
-   * When failuresValidityInterval is > 0, it also removes time entries from
-   * <code>restartTimes</code> which are outside the validity interval.
+   * When failuresValidityInterval is {@literal >} 0, it also removes time
+   * entries from <code>restartTimes</code> which are outside the validity
+   * interval.
    */
   protected void updateRetryContext(RetryContext retryContext) {
     if (retryContext.containerRetryContext.getFailuresValidityInterval() > 0) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6e7091f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6e7091f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6e7091f

Branch: refs/heads/HDDS-4
Commit: a6e7091f5d98e63a56b2e98063d0df2fc3dd33d3
Parents: b28e646
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu May 31 08:49:34 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 ------------
 1 file changed, 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6e7091f/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9f7fc84..1857fc4 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -129,18 +129,6 @@
     </description>
   </property>
   <property>
-    <name>dfs.ratis.client.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis client request.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server request.</description>
-  </property>
-  <property>
     <name>ozone.container.report.interval</name>
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

Posted by xy...@apache.org.
Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3236a96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3236a96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3236a96

Branch: refs/heads/HDDS-4
Commit: e3236a9680709de7a95ffbc11b20e1bdc95a8605
Parents: 9502b47
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue May 29 14:15:12 2018 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue May 29 14:15:12 2018 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/util/RunJar.java     | 10 +++++
 .../java/org/apache/hadoop/util/TestRunJar.java | 42 ++++++++++++++++++++
 2 files changed, 52 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3236a96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index f1b643c..4c94dbc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -117,12 +117,17 @@ public class RunJar {
       throws IOException {
     try (JarInputStream jar = new JarInputStream(inputStream)) {
       int numOfFailedLastModifiedSet = 0;
+      String targetDirPath = toDir.getCanonicalPath() + File.separator;
       for (JarEntry entry = jar.getNextJarEntry();
            entry != null;
            entry = jar.getNextJarEntry()) {
         if (!entry.isDirectory() &&
             unpackRegex.matcher(entry.getName()).matches()) {
           File file = new File(toDir, entry.getName());
+          if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+            throw new IOException("expanding " + entry.getName()
+                + " would create file outside of " + toDir);
+          }
           ensureDirectory(file.getParentFile());
           try (OutputStream out = new FileOutputStream(file)) {
             IOUtils.copyBytes(jar, out, BUFFER_SIZE);
@@ -182,6 +187,7 @@ public class RunJar {
       throws IOException {
     try (JarFile jar = new JarFile(jarFile)) {
       int numOfFailedLastModifiedSet = 0;
+      String targetDirPath = toDir.getCanonicalPath() + File.separator;
       Enumeration<JarEntry> entries = jar.entries();
       while (entries.hasMoreElements()) {
         final JarEntry entry = entries.nextElement();
@@ -189,6 +195,10 @@ public class RunJar {
             unpackRegex.matcher(entry.getName()).matches()) {
           try (InputStream in = jar.getInputStream(entry)) {
             File file = new File(toDir, entry.getName());
+            if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+              throw new IOException("expanding " + entry.getName()
+                  + " would create file outside of " + toDir);
+            }
             ensureDirectory(file.getParentFile());
             try (OutputStream out = new FileOutputStream(file)) {
               IOUtils.copyBytes(in, out, BUFFER_SIZE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3236a96/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index ea07b97..a8c27d4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.util.RunJar.MATCH_ANY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
@@ -32,6 +33,7 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
 import java.util.Random;
 import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
@@ -255,4 +257,44 @@ public class TestRunJar {
     // it should not throw an exception
     verify(runJar, times(0)).unJar(any(File.class), any(File.class));
   }
+
+  @Test
+  public void testUnJar2() throws IOException {
+    // make a simple zip
+    File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
+    JarOutputStream jstream =
+        new JarOutputStream(new FileOutputStream(jarFile));
+    JarEntry je = new JarEntry("META-INF/MANIFEST.MF");
+    byte[] data = "Manifest-Version: 1.0\nCreated-By: 1.8.0_1 (Manual)"
+        .getBytes(StandardCharsets.UTF_8);
+    je.setSize(data.length);
+    jstream.putNextEntry(je);
+    jstream.write(data);
+    jstream.closeEntry();
+    je = new JarEntry("../outside.path");
+    data = "any data here".getBytes(StandardCharsets.UTF_8);
+    je.setSize(data.length);
+    jstream.putNextEntry(je);
+    jstream.write(data);
+    jstream.closeEntry();
+    jstream.close();
+
+    File unjarDir = getUnjarDir("unjar-path");
+
+    // Unjar everything
+    try {
+      RunJar.unJar(jarFile, unjarDir, MATCH_ANY);
+      fail("unJar should throw IOException.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          "would create file outside of", e);
+    }
+    try {
+      RunJar.unJar(new FileInputStream(jarFile), unjarDir, MATCH_ANY);
+      fail("unJar should throw IOException.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          "would create file outside of", e);
+    }
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils. Contributed by Lokesh Jain.

Posted by xy...@apache.org.
HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a9652e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a9652e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a9652e6

Branch: refs/heads/HDDS-4
Commit: 2a9652e69650973f6158b60ff131215827738db6
Parents: 13d2528
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 25 15:40:46 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 25 15:45:50 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/scm/client/HddsClientUtils.java | 23 +++++++++
 .../apache/hadoop/ozone/client/ObjectStore.java |  9 ----
 .../apache/hadoop/ozone/client/OzoneBucket.java | 24 +--------
 .../apache/hadoop/ozone/client/OzoneVolume.java | 18 +------
 .../hadoop/ozone/client/rest/RestClient.java    | 52 ++++++++------------
 .../hadoop/ozone/client/rpc/RpcClient.java      | 46 +++++++----------
 6 files changed, 64 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index bc5f8d6..a6813eb 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -170,6 +170,29 @@ public final class HddsClientUtils {
   }
 
   /**
+   * verifies that bucket / volume name is a valid DNS name.
+   *
+   * @param resourceNames Array of bucket / volume names to be verified.
+   */
+  public static void verifyResourceName(String... resourceNames) {
+    for (String resourceName : resourceNames) {
+      HddsClientUtils.verifyResourceName(resourceName);
+    }
+  }
+
+  /**
+   * Checks that object parameters passed as reference is not null.
+   *
+   * @param references Array of object references to be checked.
+   * @param <T>
+   */
+  public static <T> void checkNotNull(T... references) {
+    for (T ref: references) {
+      Preconditions.checkNotNull(ref);
+    }
+  }
+
+  /**
    * Returns the cache value to be used for list calls.
    * @param conf Configuration object
    * @return list cache size

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index d8b3011..c5f0689 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -63,8 +63,6 @@ public class ObjectStore {
    * @throws IOException
    */
   public void createVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    HddsClientUtils.verifyResourceName(volumeName);
     proxy.createVolume(volumeName);
   }
 
@@ -76,9 +74,6 @@ public class ObjectStore {
    */
   public void createVolume(String volumeName, VolumeArgs volumeArgs)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(volumeArgs);
-    HddsClientUtils.verifyResourceName(volumeName);
     proxy.createVolume(volumeName, volumeArgs);
   }
 
@@ -89,8 +84,6 @@ public class ObjectStore {
    * @throws IOException
    */
   public OzoneVolume getVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    HddsClientUtils.verifyResourceName(volumeName);
     OzoneVolume volume = proxy.getVolumeDetails(volumeName);
     return volume;
   }
@@ -150,8 +143,6 @@ public class ObjectStore {
    * @throws IOException
    */
   public void deleteVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    HddsClientUtils.verifyResourceName(volumeName);
     proxy.deleteVolume(volumeName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 5df0254..2f3cff6 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -104,6 +104,7 @@ public class OzoneBucket {
                      String volumeName, String bucketName,
                      List<OzoneAcl> acls, StorageType storageType,
                      Boolean versioning, long creationTime) {
+    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
     this.proxy = proxy;
     this.volumeName = volumeName;
     this.name = bucketName;
@@ -180,8 +181,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void addAcls(List<OzoneAcl> addAcls) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(addAcls);
     proxy.addBucketAcls(volumeName, name, addAcls);
     addAcls.stream().filter(acl -> !acls.contains(acl)).forEach(
         acls::add);
@@ -193,8 +192,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void removeAcls(List<OzoneAcl> removeAcls) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(removeAcls);
     proxy.removeBucketAcls(volumeName, name, removeAcls);
     acls.removeAll(removeAcls);
   }
@@ -205,8 +202,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void setStorageType(StorageType newStorageType) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(newStorageType);
     proxy.setBucketStorageType(volumeName, name, newStorageType);
     storageType = newStorageType;
   }
@@ -217,8 +212,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void setVersioning(Boolean newVersioning) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(newVersioning);
     proxy.setBucketVersioning(volumeName, name, newVersioning);
     versioning = newVersioning;
   }
@@ -233,8 +226,6 @@ public class OzoneBucket {
    */
   public OzoneOutputStream createKey(String key, long size)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     return createKey(key, size, defaultReplicationType, defaultReplication);
   }
 
@@ -251,10 +242,6 @@ public class OzoneBucket {
                                      ReplicationType type,
                                      ReplicationFactor factor)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
-    Preconditions.checkNotNull(type);
-    Preconditions.checkNotNull(factor);
     return proxy.createKey(volumeName, name, key, size, type, factor);
   }
 
@@ -265,8 +252,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public OzoneInputStream readKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     return proxy.getKey(volumeName, name, key);
   }
 
@@ -277,8 +262,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public OzoneKey getKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     return proxy.getKeyDetails(volumeName, name, key);
   }
 
@@ -314,16 +297,11 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void deleteKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     proxy.deleteKey(volumeName, name, key);
   }
 
   public void renameKey(String fromKeyName, String toKeyName)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(fromKeyName);
-    Preconditions.checkNotNull(toKeyName);
     proxy.renameKey(volumeName, name, fromKeyName, toKeyName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index 4601f1a..77f882a 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -83,6 +83,7 @@ public class OzoneVolume {
   public OzoneVolume(Configuration conf, ClientProtocol proxy, String name,
                      String admin, String owner, long quotaInBytes,
                      long creationTime, List<OzoneAcl> acls) {
+    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
     this.proxy = proxy;
     this.name = name;
     this.admin = admin;
@@ -153,8 +154,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public void setOwner(String owner) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(owner);
     proxy.setVolumeOwner(name, owner);
     this.owner = owner;
   }
@@ -165,8 +164,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public void setQuota(OzoneQuota  quota) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(quota);
     proxy.setVolumeQuota(name, quota);
     this.quotaInBytes = quota.sizeInBytes();
   }
@@ -178,9 +175,6 @@ public class OzoneVolume {
    */
   public void createBucket(String bucketName)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    HddsClientUtils.verifyResourceName(bucketName);
     proxy.createBucket(name, bucketName);
   }
 
@@ -192,10 +186,6 @@ public class OzoneVolume {
    */
   public void createBucket(String bucketName, BucketArgs bucketArgs)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    Preconditions.checkNotNull(bucketArgs);
-    HddsClientUtils.verifyResourceName(bucketName);
     proxy.createBucket(name, bucketName, bucketArgs);
   }
 
@@ -206,9 +196,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public OzoneBucket getBucket(String bucketName) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    HddsClientUtils.verifyResourceName(bucketName);
     OzoneBucket bucket = proxy.getBucketDetails(name, bucketName);
     return bucket;
   }
@@ -246,9 +233,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public void deleteBucket(String bucketName) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    HddsClientUtils.verifyResourceName(bucketName);
     proxy.deleteBucket(name, bucketName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index ac71abe..1169820 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -211,7 +211,8 @@ public class RestClient implements ClientProtocol {
   public void createVolume(String volumeName, VolumeArgs volArgs)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
+      Preconditions.checkNotNull(volArgs);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       String owner = volArgs.getOwner() == null ?
           ugi.getUserName() : volArgs.getOwner();
@@ -256,7 +257,7 @@ public class RestClient implements ClientProtocol {
   public void setVolumeOwner(String volumeName, String owner)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       Preconditions.checkNotNull(owner);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName);
@@ -273,7 +274,7 @@ public class RestClient implements ClientProtocol {
   public void setVolumeQuota(String volumeName, OzoneQuota quota)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       Preconditions.checkNotNull(quota);
       String quotaString = quota.toString();
       URIBuilder builder = new URIBuilder(ozoneRestUri);
@@ -291,7 +292,7 @@ public class RestClient implements ClientProtocol {
   public OzoneVolume getVolumeDetails(String volumeName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName);
       builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
@@ -326,7 +327,7 @@ public class RestClient implements ClientProtocol {
   @Override
   public void deleteVolume(String volumeName) throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName);
       HttpDelete httpDelete = new HttpDelete(builder.build());
@@ -362,8 +363,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, BucketArgs bucketArgs)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(bucketArgs);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       OzoneConsts.Versioning versioning = OzoneConsts.Versioning.DISABLED;
@@ -404,8 +404,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, List<OzoneAcl> addAcls)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(addAcls);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -429,8 +428,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, List<OzoneAcl> removeAcls)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(removeAcls);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -454,8 +452,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, Boolean versioning)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(versioning);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -477,8 +474,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, StorageType storageType)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(storageType);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -498,8 +494,7 @@ public class RestClient implements ClientProtocol {
   public void deleteBucket(String volumeName, String bucketName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
           PATH_SEPARATOR + bucketName);
@@ -521,8 +516,7 @@ public class RestClient implements ClientProtocol {
   public OzoneBucket getBucketDetails(String volumeName, String bucketName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
           PATH_SEPARATOR + bucketName);
@@ -573,9 +567,8 @@ public class RestClient implements ClientProtocol {
     // TODO: Once ReplicationType and ReplicationFactor are supported in
     // OzoneHandler (in Datanode), set them in header.
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(keyName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
+      HddsClientUtils.checkNotNull(keyName, type, factor);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
           PATH_SEPARATOR + bucketName +
@@ -617,8 +610,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, String keyName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(keyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
@@ -661,8 +653,7 @@ public class RestClient implements ClientProtocol {
   public void deleteKey(String volumeName, String bucketName, String keyName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(keyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
@@ -679,10 +670,8 @@ public class RestClient implements ClientProtocol {
   public void renameKey(String volumeName, String bucketName,
       String fromKeyName, String toKeyName) throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(fromKeyName);
-      Preconditions.checkNotNull(toKeyName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
+      HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName
           + PATH_SEPARATOR + fromKeyName);
@@ -708,8 +697,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, String keyName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(keyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index ffe93dd..43b94a1 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -22,6 +22,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -170,7 +171,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void createVolume(String volumeName, VolumeArgs volArgs)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(volArgs);
 
     String admin = volArgs.getAdmin() == null ?
@@ -214,7 +215,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void setVolumeOwner(String volumeName, String owner)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(owner);
     keySpaceManagerClient.setOwner(volumeName, owner);
   }
@@ -222,7 +223,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void setVolumeQuota(String volumeName, OzoneQuota quota)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(quota);
     long quotaInBytes = quota.sizeInBytes();
     keySpaceManagerClient.setQuota(volumeName, quotaInBytes);
@@ -231,7 +232,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public OzoneVolume getVolumeDetails(String volumeName)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     KsmVolumeArgs volume = keySpaceManagerClient.getVolumeInfo(volumeName);
     return new OzoneVolume(
         conf,
@@ -253,7 +254,7 @@ public class RpcClient implements ClientProtocol {
 
   @Override
   public void deleteVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     keySpaceManagerClient.deleteVolume(volumeName);
   }
 
@@ -307,8 +308,7 @@ public class RpcClient implements ClientProtocol {
   public void createBucket(
       String volumeName, String bucketName, BucketArgs bucketArgs)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(bucketArgs);
 
     Boolean isVersionEnabled = bucketArgs.getVersioning() == null ?
@@ -346,8 +346,7 @@ public class RpcClient implements ClientProtocol {
   public void addBucketAcls(
       String volumeName, String bucketName, List<OzoneAcl> addAcls)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(addAcls);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -360,8 +359,7 @@ public class RpcClient implements ClientProtocol {
   public void removeBucketAcls(
       String volumeName, String bucketName, List<OzoneAcl> removeAcls)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(removeAcls);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -374,8 +372,7 @@ public class RpcClient implements ClientProtocol {
   public void setBucketVersioning(
       String volumeName, String bucketName, Boolean versioning)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(versioning);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -388,8 +385,7 @@ public class RpcClient implements ClientProtocol {
   public void setBucketStorageType(
       String volumeName, String bucketName, StorageType storageType)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(storageType);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -401,8 +397,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void deleteBucket(
       String volumeName, String bucketName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     keySpaceManagerClient.deleteBucket(volumeName, bucketName);
   }
 
@@ -415,8 +410,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public OzoneBucket getBucketDetails(
       String volumeName, String bucketName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     KsmBucketInfo bucketArgs =
         keySpaceManagerClient.getBucketInfo(volumeName, bucketName);
     return new OzoneBucket(
@@ -454,6 +448,8 @@ public class RpcClient implements ClientProtocol {
       String volumeName, String bucketName, String keyName, long size,
       ReplicationType type, ReplicationFactor factor)
       throws IOException {
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
+    HddsClientUtils.checkNotNull(keyName, type, factor);
     String requestId = UUID.randomUUID().toString();
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
         .setVolumeName(volumeName)
@@ -486,8 +482,7 @@ public class RpcClient implements ClientProtocol {
   public OzoneInputStream getKey(
       String volumeName, String bucketName, String keyName)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
     String requestId = UUID.randomUUID().toString();
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
@@ -508,8 +503,7 @@ public class RpcClient implements ClientProtocol {
   public void deleteKey(
       String volumeName, String bucketName, String keyName)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
         .setVolumeName(volumeName)
@@ -522,10 +516,8 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void renameKey(String volumeName, String bucketName,
       String fromKeyName, String toKeyName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    Preconditions.checkNotNull(fromKeyName);
-    Preconditions.checkNotNull(toKeyName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
+    HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HDDS-125. Cleanup HDDS CheckStyle issues. Contributed by Anu Engineer.

Posted by xy...@apache.org.
HDDS-125. Cleanup HDDS CheckStyle issues.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9502b47b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9502b47b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9502b47b

Branch: refs/heads/HDDS-4
Commit: 9502b47bd2a3cf32edae635293169883c2914475
Parents: 17aa40f
Author: Anu Engineer <ae...@apache.org>
Authored: Tue May 29 09:54:06 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue May 29 09:54:06 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  1 -
 .../hdds/scm/block/DeletedBlockLogImpl.java     |  2 +-
 .../hdds/scm/container/ContainerMapping.java    |  6 +-
 .../scm/container/ContainerStateManager.java    | 24 +++----
 .../hadoop/hdds/scm/container/Mapping.java      |  9 ++-
 .../hdds/scm/node/SCMNodeStorageStatMXBean.java |  4 +-
 .../hdds/scm/node/SCMNodeStorageStatMap.java    | 19 +++---
 .../hdds/scm/node/StorageReportResult.java      |  8 +--
 .../hdds/scm/node/states/Node2ContainerMap.java |  2 +-
 .../hdds/scm/pipelines/PipelineSelector.java    |  5 +-
 .../scm/server/StorageContainerManager.java     |  3 +-
 .../TestStorageContainerManagerHttpServer.java  |  1 -
 .../hadoop/hdds/scm/block/package-info.java     | 23 +++++++
 .../scm/container/TestContainerMapping.java     | 12 ++--
 .../hdds/scm/container/closer/package-info.java | 22 +++++++
 .../hadoop/hdds/scm/container/package-info.java | 22 +++++++
 .../hdds/scm/container/states/package-info.java | 22 +++++++
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 66 ++++++++++----------
 .../scm/node/TestSCMNodeStorageStatMap.java     | 32 +++++-----
 .../hadoop/hdds/scm/node/package-info.java      | 22 +++++++
 .../ozone/container/common/TestEndPoint.java    |  2 -
 .../ozone/container/common/package-info.java    | 22 +++++++
 .../ozone/container/placement/package-info.java | 22 +++++++
 .../replication/TestContainerSupervisor.java    |  7 ++-
 24 files changed, 263 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 5a98e85..d17d6c0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -41,7 +41,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index cabcb46..cedc506 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -190,7 +190,7 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
     try {
       for(Long txID : txIDs) {
         try {
-          byte [] deleteBlockBytes =
+          byte[] deleteBlockBytes =
               deletedStore.get(Longs.toByteArray(txID));
           if (deleteBlockBytes == null) {
             LOG.warn("Delete txID {} not found", txID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index e569874..2d88621 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -152,7 +152,8 @@ public class ContainerMapping implements Mapping {
     ContainerInfo containerInfo;
     lock.lock();
     try {
-      byte[] containerBytes = containerStore.get(Longs.toByteArray(containerID));
+      byte[] containerBytes = containerStore.get(
+          Longs.toByteArray(containerID));
       if (containerBytes == null) {
         throw new SCMException(
             "Specified key does not exist. key : " + containerID,
@@ -229,7 +230,8 @@ public class ContainerMapping implements Mapping {
           containerStateManager.allocateContainer(
               pipelineSelector, type, replicationFactor, owner);
 
-      byte[] containerIDBytes = Longs.toByteArray(containerInfo.getContainerID());
+      byte[] containerIDBytes = Longs.toByteArray(
+          containerInfo.getContainerID());
       containerStore.put(containerIDBytes, containerInfo.getProtobuf()
               .toByteArray());
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index f11a50c..4895b78 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -230,18 +230,18 @@ public class ContainerStateManager implements Closeable {
    *
    * Container State Flow:
    *
-   * [ALLOCATED]------->[CREATING]--------->[OPEN]---------->[CLOSING]------->[CLOSED]
-   *            (CREATE)     |    (CREATED)       (FINALIZE)          (CLOSE)    |
-   *                         |                                                   |
-   *                         |                                                   |
-   *                         |(TIMEOUT)                                  (DELETE)|
-   *                         |                                                   |
-   *                         +------------------> [DELETING] <-------------------+
-   *                                                   |
-   *                                                   |
-   *                                          (CLEANUP)|
-   *                                                   |
-   *                                               [DELETED]
+   * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]------->[CLOSED]
+   *            (CREATE)     |    (CREATED)       (FINALIZE)     (CLOSE)    |
+   *                         |                                              |
+   *                         |                                              |
+   *                         |(TIMEOUT)                             (DELETE)|
+   *                         |                                              |
+   *                         +-------------> [DELETING] <-------------------+
+   *                                            |
+   *                                            |
+   *                                   (CLEANUP)|
+   *                                            |
+   *                                        [DELETED]
    */
   private void initializeStateMachine() {
     stateMachine.addTransition(LifeCycleState.ALLOCATED,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index 61dee2b..f560174 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -45,7 +45,8 @@ public interface Mapping extends Closeable {
    * The max size of the searching range cannot exceed the
    * value of count.
    *
-   * @param startContainerID start containerID, >=0, start searching at the head if 0.
+   * @param startContainerID start containerID, >=0,
+   * start searching at the head if 0.
    * @param count count must be >= 0
    *              Usually the count will be replace with a very big
    *              value instead of being unlimited in case the db is very big.
@@ -53,7 +54,8 @@ public interface Mapping extends Closeable {
    * @return a list of container.
    * @throws IOException
    */
-  List<ContainerInfo> listContainer(long startContainerID, int count) throws IOException;
+  List<ContainerInfo> listContainer(long startContainerID, int count)
+      throws IOException;
 
   /**
    * Allocates a new container for a given keyName and replication factor.
@@ -64,7 +66,8 @@ public interface Mapping extends Closeable {
    * @throws IOException
    */
   ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor replicationFactor, String owner) throws IOException;
+      HddsProtos.ReplicationFactor replicationFactor, String owner)
+      throws IOException;
 
   /**
    * Deletes a container from SCM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
index d81ff0f..32ecbad 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
@@ -31,7 +31,7 @@ import java.util.UUID;
 @InterfaceAudience.Private
 public interface SCMNodeStorageStatMXBean {
   /**
-   * Get the capacity of the dataNode
+   * Get the capacity of the dataNode.
    * @param datanodeID Datanode Id
    * @return long
    */
@@ -52,7 +52,7 @@ public interface SCMNodeStorageStatMXBean {
   long getUsedSpace(UUID datanodeId);
 
   /**
-   * Returns the total capacity of all dataNodes
+   * Returns the total capacity of all dataNodes.
    * @return long
    */
   long getTotalCapacity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index f8ad2af..fa423bb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -56,7 +56,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   // NodeStorageInfo MXBean
   private ObjectName scmNodeStorageInfoBean;
   /**
-   * constructs the scmNodeStorageReportMap object
+   * constructs the scmNodeStorageReportMap object.
    */
   public SCMNodeStorageStatMap(OzoneConfiguration conf) {
     // scmNodeStorageReportMap = new ConcurrentHashMap<>();
@@ -73,6 +73,9 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
             HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
   }
 
+  /**
+   * Enum that Describes what we should do at various thresholds.
+   */
   public enum UtilizationThreshold {
     NORMAL, WARN, CRITICAL;
   }
@@ -107,8 +110,8 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * @param datanodeID -- Datanode UUID
    * @param report - set if StorageReports.
    */
-  public void insertNewDatanode(UUID datanodeID, Set<StorageLocationReport> report)
-      throws SCMException {
+  public void insertNewDatanode(UUID datanodeID,
+      Set<StorageLocationReport> report) throws SCMException {
     Preconditions.checkNotNull(report);
     Preconditions.checkState(report.size() != 0);
     Preconditions.checkNotNull(datanodeID);
@@ -142,8 +145,8 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * @throws SCMException - if we don't know about this datanode, for new DN
    *                      use insertNewDatanode.
    */
-  public void updateDatanodeMap(UUID datanodeID, Set<StorageLocationReport> report)
-      throws SCMException {
+  public void updateDatanodeMap(UUID datanodeID,
+      Set<StorageLocationReport> report) throws SCMException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(report);
     Preconditions.checkState(report.size() != 0);
@@ -301,7 +304,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   }
 
   /**
-   * removes the dataNode from scmNodeStorageReportMap
+   * removes the dataNode from scmNodeStorageReportMap.
    * @param datanodeID
    * @throws SCMException in case the dataNode is not found in the map.
    */
@@ -339,11 +342,11 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   }
 
   /**
-   * get the scmUsed ratio
+   * get the scmUsed ratio.
    */
   public  double getScmUsedratio(long scmUsed, long capacity) {
     double scmUsedRatio =
-        truncateDecimals (scmUsed / (double) capacity);
+        truncateDecimals(scmUsed / (double) capacity);
     return scmUsedRatio;
   }
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
index 3436e77..0b63ceb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
@@ -69,14 +69,14 @@ public class StorageReportResult {
     }
 
     public ReportResultBuilder setFullVolumeSet(
-        Set<StorageLocationReport> fullVolumes) {
-      this.fullVolumes = fullVolumes;
+        Set<StorageLocationReport> fullVolumesSet) {
+      this.fullVolumes = fullVolumesSet;
       return this;
     }
 
     public ReportResultBuilder setFailedVolumeSet(
-        Set<StorageLocationReport> failedVolumes) {
-      this.failedVolumes = failedVolumes;
+        Set<StorageLocationReport> failedVolumesSet) {
+      this.failedVolumes = failedVolumesSet;
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
index f850e7a..1960604 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -92,7 +92,7 @@ public class Node2ContainerMap {
   }
 
   /**
-   * Removes datanode Entry from the map
+   * Removes datanode Entry from the map.
    * @param datanodeID - Datanode ID.
    */
   public void removeDatanode(UUID datanodeID) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index d29bb84..2e56043 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -170,8 +170,9 @@ public class PipelineSelector {
       throws IOException {
     PipelineManager manager = getPipelineManager(replicationType);
     Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Getting replication pipeline forReplicationType {} : ReplicationFactor {}",
-        replicationType.toString(), replicationFactor.toString());
+    LOG.debug("Getting replication pipeline forReplicationType {} :" +
+            " ReplicationFactor {}", replicationType.toString(),
+        replicationFactor.toString());
     return manager.
         getPipeline(replicationFactor, replicationType);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 0fd6843..78f13cb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
@@ -87,7 +86,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
  * create a container, which then can be used to store data.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public class StorageContainerManager extends ServiceRuntimeInfoImpl
+public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     implements SCMMXBean {
 
   private static final Logger LOG = LoggerFactory

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 0dbb7c1..d9e1425 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -39,7 +39,6 @@ import org.junit.runners.Parameterized.Parameters;
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.Arrays;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
new file mode 100644
index 0000000..a67df69
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * Make checkstyle happy.
+ * */
+package org.apache.hadoop.hdds.scm.block;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index a27068bb..f318316 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -216,8 +216,10 @@ public class TestContainerMapping {
 
     mapping.processContainerReports(crBuilder.build());
 
-    ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
-    Assert.assertEquals(100000000L, updatedContainer.getNumberOfKeys());
+    ContainerInfo updatedContainer =
+        mapping.getContainer(info.getContainerID());
+    Assert.assertEquals(100000000L,
+        updatedContainer.getNumberOfKeys());
     Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
   }
 
@@ -251,8 +253,10 @@ public class TestContainerMapping {
 
     mapping.processContainerReports(crBuilder.build());
 
-    ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
-    Assert.assertEquals(500000000L, updatedContainer.getNumberOfKeys());
+    ContainerInfo updatedContainer =
+        mapping.getContainer(info.getContainerID());
+    Assert.assertEquals(500000000L,
+        updatedContainer.getNumberOfKeys());
     Assert.assertEquals(5368705120L, updatedContainer.getUsedBytes());
     NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager()
         .getMatchingContainerIDs(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
new file mode 100644
index 0000000..2f35719
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle happy.
+ */
+package org.apache.hadoop.hdds.scm.container.closer;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
new file mode 100644
index 0000000..f93aea6
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.hdds.scm.container;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
new file mode 100644
index 0000000..795dfc1
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 36e796f..de87e50 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -510,42 +510,42 @@ public class TestNodeManager {
    * @throws InterruptedException
    * @throws TimeoutException
    */
+  /**
+   * These values are very important. Here is what it means so you don't
+   * have to look it up while reading this code.
+   *
+   *  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
+   *  HB processing thread that is running in the SCM. This thread must run
+   *  for the SCM  to process the Heartbeats.
+   *
+   *  OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
+   *  datanodes will send heartbeats to SCM. Please note: This is the only
+   *  config value for node manager that is specified in seconds. We don't
+   *  want SCM heartbeat resolution to be more than in seconds.
+   *  In this test it is not used, but we are forced to set it because we
+   *  have validation code that checks Stale Node interval and Dead Node
+   *  interval is larger than the value of
+   *  OZONE_SCM_HEARTBEAT_INTERVAL.
+   *
+   *  OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
+   *  from the last heartbeat for us to mark a node as stale. In this test
+   *  we set that to 3. That is if a node has not heartbeat SCM for last 3
+   *  seconds we will mark it as stale.
+   *
+   *  OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
+   *  from the last heartbeat for a node to be marked dead. We have an
+   *  additional constraint that this must be at least 2 times bigger than
+   *  Stale node Interval.
+   *
+   *  With these we are trying to explore the state of this cluster with
+   *  various timeouts. Each section is commented so that you can keep
+   *  track of the state of the cluster nodes.
+   *
+   */
+
   @Test
   public void testScmClusterIsInExpectedState1() throws IOException,
       InterruptedException, TimeoutException {
-    /**
-     * These values are very important. Here is what it means so you don't
-     * have to look it up while reading this code.
-     *
-     *  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
-     *  HB processing thread that is running in the SCM. This thread must run
-     *  for the SCM  to process the Heartbeats.
-     *
-     *  OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
-     *  datanodes will send heartbeats to SCM. Please note: This is the only
-     *  config value for node manager that is specified in seconds. We don't
-     *  want SCM heartbeat resolution to be more than in seconds.
-     *  In this test it is not used, but we are forced to set it because we
-     *  have validation code that checks Stale Node interval and Dead Node
-     *  interval is larger than the value of
-     *  OZONE_SCM_HEARTBEAT_INTERVAL.
-     *
-     *  OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
-     *  from the last heartbeat for us to mark a node as stale. In this test
-     *  we set that to 3. That is if a node has not heartbeat SCM for last 3
-     *  seconds we will mark it as stale.
-     *
-     *  OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
-     *  from the last heartbeat for a node to be marked dead. We have an
-     *  additional constraint that this must be at least 2 times bigger than
-     *  Stale node Interval.
-     *
-     *  With these we are trying to explore the state of this cluster with
-     *  various timeouts. Each section is commented so that you can keep
-     *  track of the state of the cluster nodes.
-     *
-     */
-
     OzoneConfiguration conf = getConf();
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
         MILLISECONDS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 571de77..b824412 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -42,11 +42,14 @@ import java.util.HashSet;
 import java.io.IOException;
 import java.util.concurrent.ConcurrentHashMap;
 
+/**
+ * Test Node Storage Map.
+ */
 public class TestSCMNodeStorageStatMap {
   private final static int DATANODE_COUNT = 100;
-  final long capacity = 10L * OzoneConsts.GB;
-  final long used = 2L * OzoneConsts.GB;
-  final long remaining = capacity - used;
+  private final long capacity = 10L * OzoneConsts.GB;
+  private final long used = 2L * OzoneConsts.GB;
+  private final long remaining = capacity - used;
   private static OzoneConfiguration conf = new OzoneConfiguration();
   private final Map<UUID, Set<StorageLocationReport>> testData =
       new ConcurrentHashMap<>();
@@ -59,9 +62,10 @@ public class TestSCMNodeStorageStatMap {
       UUID dnId = UUID.randomUUID();
       Set<StorageLocationReport> reportSet = new HashSet<>();
       String path = GenericTestUtils.getTempPath(
-          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + Integer
-              .toString(dnIndex));
-      StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
+          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" +
+              Integer.toString(dnIndex));
+      StorageLocationReport.Builder builder =
+          StorageLocationReport.newBuilder();
       builder.setStorageType(StorageType.DISK).setId(dnId.toString())
           .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
           .setCapacity(capacity).setFailed(false);
@@ -139,12 +143,12 @@ public class TestSCMNodeStorageStatMap {
     String path =
         GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
     StorageLocationReport report = reportSet.iterator().next();
-    long capacity = report.getCapacity();
-    long used = report.getScmUsed();
-    long remaining = report.getRemaining();
+    long reportCapacity = report.getCapacity();
+    long reportScmUsed = report.getScmUsed();
+    long reportRemaining = report.getRemaining();
     List<SCMStorageReport> reports = TestUtils
-        .createStorageReport(capacity, used, remaining, path, null, storageId,
-            1);
+        .createStorageReport(reportCapacity, reportScmUsed, reportRemaining,
+            path, null, storageId, 1);
     StorageReportResult result =
         map.processNodeReport(key, TestUtils.createNodeReport(reports));
     Assert.assertEquals(result.getStatus(),
@@ -158,7 +162,7 @@ public class TestSCMNodeStorageStatMap {
         SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
 
     reportList.add(TestUtils
-        .createStorageReport(capacity, capacity, 0, path, null,
+        .createStorageReport(reportCapacity, reportCapacity, 0, path, null,
             UUID.randomUUID().toString(), 1).get(0));
     result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
     Assert.assertEquals(result.getStatus(),
@@ -166,8 +170,8 @@ public class TestSCMNodeStorageStatMap {
     // Mark a disk failed 
     SCMStorageReport srb2 = SCMStorageReport.newBuilder()
         .setStorageUuid(UUID.randomUUID().toString())
-        .setStorageLocation(srb.getStorageLocation()).setScmUsed(capacity)
-        .setCapacity(capacity).setRemaining(0).setFailed(true).build();
+        .setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity)
+        .setCapacity(reportCapacity).setRemaining(0).setFailed(true).build();
     reportList.add(srb2);
     nrb.addAllStorageReport(reportList);
     result = map.processNodeReport(key, nrb.addStorageReport(srb).build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
new file mode 100644
index 0000000..dfd8397
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.hdds.scm.node;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index e82dc98..1d92cdc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
new file mode 100644
index 0000000..da2ae84
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.ozone.container.common;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
new file mode 100644
index 0000000..ddd751c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.ozone.container.placement;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
index 01f70b1..e197886 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
@@ -202,8 +202,8 @@ public class TestContainerSupervisor {
       ppool.handleContainerReport(reportsProto);
     }
 
-    clist = datanodeStateManager.getContainerReport(wayOverReplicatedContainerID,
-        ppool.getPool().getPoolName(), 7);
+    clist = datanodeStateManager.getContainerReport(
+        wayOverReplicatedContainerID, ppool.getPool().getPoolName(), 7);
 
     for (ContainerReportsRequestProto reportsProto : clist) {
       ppool.handleContainerReport(reportsProto);
@@ -264,7 +264,8 @@ public class TestContainerSupervisor {
               "PoolNew", 1);
       containerSupervisor.handleContainerReport(clist.get(0));
       GenericTestUtils.waitFor(() ->
-          inProgressLog.getOutput().contains(Long.toString(newContainerID)) && inProgressLog
+          inProgressLog.getOutput()
+              .contains(Long.toString(newContainerID)) && inProgressLog
               .getOutput().contains(id.getUuidString()),
           200, 10 * 1000);
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46edc0d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46edc0d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46edc0d2

Branch: refs/heads/HDDS-4
Commit: 46edc0d2f4d7fe7175c30ab2dfa4c3ffc2087382
Parents: 5d68690
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 22 13:32:28 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu May 31 08:49:34 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  4 --
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   | 14 ++---
 .../scm/protocol/ScmBlockLocationProtocol.java  |  2 +-
 .../StorageContainerLocationProtocol.java       |  3 +-
 .../protocolPB/ScmBlockLocationProtocolPB.java  |  4 +-
 .../StorageContainerLocationProtocolPB.java     |  2 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |  8 +--
 .../common/src/main/resources/ozone-default.xml | 54 ++++++--------------
 .../StorageContainerDatanodeProtocol.java       |  2 +-
 .../StorageContainerDatanodeProtocolPB.java     |  2 +-
 .../scm/server/StorageContainerManager.java     | 12 ++---
 .../StorageContainerManagerHttpServer.java      |  4 +-
 .../compose/compose-secure/docker-compose.yaml  |  6 +--
 .../test/compose/compose-secure/docker-config   | 12 ++---
 .../acceptance/ozone-secure.robot               | 12 ++---
 .../ozone/client/protocol/ClientProtocol.java   |  2 +-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  | 10 ++--
 .../ksm/protocol/KeySpaceManagerProtocol.java   |  4 +-
 .../protocolPB/KeySpaceManagerProtocolPB.java   |  3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java    | 32 ++++++------
 .../hadoop/ozone/ksm/KeySpaceManager.java       | 13 ++---
 .../ozone/ksm/KeySpaceManagerHttpServer.java    |  4 +-
 22 files changed, 89 insertions(+), 120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index a12d6ac..dec2c1c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -20,8 +20,4 @@ package org.apache.hadoop.hdds;
 public final class HddsConfigKeys {
   private HddsConfigKeys() {
   }
-  public static final String HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY = "hdds.ksm."
-      + "kerberos.keytab.file";
-  public static final String HDDS_KSM_KERBEROS_PRINCIPAL_KEY = "hdds.ksm"
-      + ".kerberos.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index ba8f310..7929a08 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -132,9 +132,9 @@ public final class ScmConfigKeys {
       "ozone.scm.http-address";
   public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
       "ozone.scm.https-address";
-  public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
-      "ozone.scm.kerberos.keytab.file";
-  public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = "ozone.scm.kerberos.principal";
+  public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
+      "hdds.scm.kerberos.keytab.file";
+  public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = "hdds.scm.kerberos.principal";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -281,10 +281,10 @@ public final class ScmConfigKeys {
       "ozone.scm.container.close.threshold";
   public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
 
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
-      "ozone.scm.web.authentication.kerberos.principal";
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
-      "ozone.scm.web.authentication.kerberos.keytab";
+  public static final String HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+      "hdds.scm.web.authentication.kerberos.principal";
+  public static final String HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+      "hdds.scm.web.authentication.kerberos.keytab";
   /**
    * Never constructed.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index e17f1c2..2d46ae0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -33,7 +33,7 @@ import java.util.List;
  * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
  * to read/write a block.
  */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index d36bdf3..13545fb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.hdds.scm.protocol;
 
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -35,7 +34,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
  * that currently host a container.
  */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerLocationProtocol {
   /**
    * Asks SCM where a container should be allocated. SCM responds with the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
index 89bb066..06bbd05 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -18,11 +18,9 @@
 package org.apache.hadoop.hdds.scm.protocolPB;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
     .ScmBlockLocationProtocolService;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -35,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
     protocolVersion = 1)
 @InterfaceAudience.Private
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocolPB
     extends ScmBlockLocationProtocolService.BlockingInterface {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
index 3bd83f9..f80ba20 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
     "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
     protocolVersion = 1)
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerLocationProtocolPB
     extends StorageContainerLocationProtocolService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index ac5d864..b8f7a29 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -238,9 +238,6 @@ public final class OzoneConfigKeys {
       DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
       ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
 
-  public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "ozone.web.authentication.kerberos.principal";
-
   public static final String HDDS_DATANODE_PLUGINS_KEY =
       "hdds.datanode.plugins";
 
@@ -259,6 +256,11 @@ public final class OzoneConfigKeys {
   public static final String OZONE_SYSTEM_TAGS_KEY = "ozone.system.tags";
   public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
 
+  public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om."
+      + "kerberos.keytab.file";
+  public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om"
+      + ".kerberos.principal";
+
   /**
    * There is no need to instantiate this class.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9f7fc84..42496c4 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -345,14 +345,6 @@
     </description>
   </property>
   <property>
-    <name>ozone.ksm.keytab.file</name>
-    <value/>
-    <tag>KSM, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in KSM.
-    </description>
-  </property>
-  <property>
     <name>ozone.ksm.db.cache.size.mb</name>
     <value>128</value>
     <tag>KSM, PERFORMANCE</tag>
@@ -853,20 +845,6 @@
       the logs. Very useful when debugging REST protocol.
     </description>
   </property>
-  <property>
-    <name>ozone.web.authentication.kerberos.principal</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>
-      The server principal used by the SCM and KSM for web UI SPNEGO
-      authentication when Kerberos security is enabled. This is typically set to
-      HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix
-      HTTP/ by convention.
-
-      If the value is '*', the web server will attempt to login with
-      every principal specified in the keytab file.
-    </description>
-  </property>
 
   <!--Client Settings-->
   <property>
@@ -902,7 +880,7 @@
   </property>
 
   <property>
-    <name>ozone.scm.container.creation.lease.timeout</name>
+    <name>hdds.scm.container.creation.lease.timeout</name>
     <value>60s</value>
     <tag>OZONE, SCM</tag>
     <description>
@@ -956,7 +934,7 @@
     </description>
   </property>
   <property>
-    <name>ozone.scm.container.close.threshold</name>
+    <name>hdds.scm.container.close.threshold</name>
     <value>0.9f</value>
     <tag>OZONE, SCM</tag>
     <description>
@@ -1087,58 +1065,58 @@
   </property>
 
   <property>
-    <name>ozone.scm.kerberos.keytab.file</name>
+    <name>hdds.scm.kerberos.keytab.file</name>
     <value></value>
     <tag> OZONE, SECURITY</tag>
     <description> The keytab file used by each SCM daemon to login as its
       service principal. The principal name is configured with
-      ozone.scm.kerberos.principal.
+      hdds.scm.kerberos.principal.
     </description>
   </property>
   <property>
-    <name>ozone.scm.kerberos.principal</name>
+    <name>hdds.scm.kerberos.principal</name>
     <value></value>
     <tag> OZONE, SECURITY</tag>
     <description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
   </property>
 
   <property>
-    <name>hdds.ksm.kerberos.keytab.file</name>
+    <name>ozone.om.kerberos.keytab.file</name>
     <value></value>
     <tag> HDDS, SECURITY</tag>
-    <description> The keytab file used by KSM daemon to login as its
+    <description> The keytab file used by OzoneManager daemon to login as its
       service principal. The principal name is configured with
-      hdds.ksm.kerberos.principal.
+      ozone.om.kerberos.principal.
     </description>
   </property>
   <property>
-    <name>hdds.ksm.kerberos.principal</name>
+    <name>ozone.om.kerberos.principal</name>
     <value></value>
     <tag> HDDS, SECURITY</tag>
-    <description>The KSM service principal. Ex ksm/_HOST@REALM.COM</description>
+    <description>The OzoneManager service principal. Ex om/_HOST@REALM.COM</description>
   </property>
 
   <property>
-    <name>ozone.scm.web.authentication.kerberos.principal</name>
+    <name>hdds.scm.web.authentication.kerberos.principal</name>
     <value>HTTP/_HOST@EXAMPLE.COM</value>
   </property>
   <property>
-    <name>ozone.scm.web.authentication.kerberos.keytab</name>
+    <name>hdds.scm.web.authentication.kerberos.keytab</name>
     <value>/etc/security/keytabs/HTTP.keytab</value>
   </property>
 
   <property>
-    <name>hdds.ksm.web.authentication.kerberos.principal</name>
+    <name>ozone.om.web.authentication.kerberos.principal</name>
     <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>
-      KSM http server kerberos principal.
+      OzoneManager http server kerberos principal.
     </description>
   </property>
   <property>
-    <name>hdds.ksm.web.authentication.kerberos.keytab</name>
+    <name>ozone.om.web.authentication.kerberos.keytab</name>
     <value>/etc/security/keytabs/HTTP.keytab</value>
     <description>
-      KSM http server kerberos keytab.
+      OzoneManager http server kerberos keytab.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index 5b04c56..9f18d96 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * Protoc file that defines this protocol.
  */
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerDatanodeProtocol {
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 9c32ef8..9006e91 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
     "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
     protocolVersion = 1)
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
     clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerDatanodeProtocolPB extends
     StorageContainerDatanodeProtocolService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 65619a4..88217e5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -77,8 +77,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_M
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 /**
@@ -209,16 +209,16 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
       throws IOException, AuthenticationException {
     LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
             + "Principal: {}, keytab: {}", this.scmConf.get
-            (OZONE_SCM_KERBEROS_PRINCIPAL_KEY),
-        this.scmConf.get(OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY));
+            (HDDS_SCM_KERBEROS_PRINCIPAL_KEY),
+        this.scmConf.get(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY));
 
     if (SecurityUtil.getAuthenticationMethod(conf).equals
         (AuthenticationMethod.KERBEROS)) {
       UserGroupInformation.setConfiguration(this.scmConf);
       InetSocketAddress socAddr = HddsServerUtil
           .getScmBlockClientBindAddress(conf);
-      SecurityUtil.login(conf, OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
-          OZONE_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+      SecurityUtil.login(conf, HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+          HDDS_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
     } else {
       throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
           (conf) + " authentication method not support. "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index da936ad..41dd89a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -62,11 +62,11 @@ public class StorageContainerManagerHttpServer extends BaseHttpServer {
   }
 
   @Override protected String getKeytabFile() {
-    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
+    return ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
   }
 
   @Override protected String getSpnegoPrincipal() {
-    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
+    return ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
   }
 
   @Override protected String getEnabledKey() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
index 2661163..db211bc 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -40,15 +40,15 @@ services:
       env_file:
         - ./docker-config
       command: ["/opt/hadoop/bin/ozone","datanode"]
-   ksm:
+   om:
       image: ahadoop/ozone:v1
-      hostname: ksm
+      hostname: om
       volumes:
          - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9874:9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_KSM_INITIALIZED: /data/metadata/om/current/VERSION
       env_file:
           - ./docker-config
       command: ["/opt/hadoop/bin/ozone","ksm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
index 678c75a..360b69a 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.ksm.address=om
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_hdds.scm.datanode.id=/data/datanode.id
@@ -25,13 +25,13 @@ OZONE-SITE.XML_hdds.scm.client.address=scm
 OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
 OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
 OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
-OZONE-SITE.XML_ozone.ksm.kerberos.principal=ksm/ksm@EXAMPLE.COM
-OZONE-SITE.XML_ozone.ksm.kerberos.keytab.file=/etc/security/keytabs/ksm.keytab
+OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
+OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
 OZONE-SITE.XML_ozone.security.enabled=true
 OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.principal=HTTP/scm@EXAMPLE.COM
 OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.principal=HTTP/ksm@EXAMPLE.COM
-OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.om.web.authentication.kerberos.principal=HTTP/om@EXAMPLE.COM
+OZONE-SITE.XML_ozone.om.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
 OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.scm.client.address=scm
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
@@ -57,7 +57,7 @@ LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH
 
 OZONE_DATANODE_SECURE_USER=root
 CONF_DIR=/etc/security/keytabs
-KERBEROS_KEYTABS=dn nn ksm scm HTTP testuser
+KERBEROS_KEYTABS=dn nn om scm HTTP testuser
 KERBEROS_KEYSTORES=hadoop
 KERBEROS_SERVER=ozone.kdc
 JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
index 4a78980..7fc1088 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
@@ -26,7 +26,7 @@ ${version}
 *** Test Cases ***
 
 Daemons are running
-    Is daemon running           ksm
+    Is daemon running           om
     Is daemon running           scm
     Is daemon running           datanode
     Is daemon running           ozone.kdc
@@ -45,15 +45,15 @@ Test rest interface
                     Should contain      ${result}       200 OK
 
 Test ozone cli
-    ${result} =     Execute on      1   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on      1   datanode        ozone oz -createVolume o3://om/hive -user bilbo -quota 100TB -root
                     Should contain      ${result}       Client cannot authenticate via
                     # Authenticate testuser
                     Execute on      0   datanode        kinit -k testuser/datanode@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
-                    Execute on      0   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
-    ${result} =     Execute on      0   datanode        ozone oz -listVolume o3://ksm/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Execute on      0   datanode        ozone oz -createVolume o3://om/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on      0   datanode        ozone oz -listVolume o3://om/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
                     Should contain      ${result}       createdOn
-                    Execute on      0   datanode        ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
-    ${result} =     Execute on      0   datanode        ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Execute on      0   datanode        ozone oz -updateVolume o3://om/hive -user bill -quota 10TB
+    ${result} =     Execute on      0   datanode        ozone oz -infoVolume o3://om/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
                     Should Be Equal     ${result}       bill
 
 *** Keywords ***

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 80b0a40..ee5dca9 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
  * {@link  org.apache.hadoop.ozone.client.rest.RestClient} for REST.
  */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ClientProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
index d911bcb..cc25dbe 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
@@ -49,8 +49,6 @@ public final class KSMConfigKeys {
       "ozone.ksm.http-address";
   public static final String OZONE_KSM_HTTPS_ADDRESS_KEY =
       "ozone.ksm.https-address";
-  public static final String OZONE_KSM_KEYTAB_FILE =
-      "ozone.ksm.keytab.file";
   public static final String OZONE_KSM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_KSM_HTTP_BIND_PORT_DEFAULT = 9874;
   public static final int OZONE_KSM_HTTPS_BIND_PORT_DEFAULT = 9875;
@@ -79,8 +77,8 @@ public final class KSMConfigKeys {
       "ozone.key.deleting.limit.per.task";
   public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
 
-  public static final String KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "hdds.ksm.web.authentication.kerberos.principal";
-  public static final String KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
-      "hdds.ksm.web.authentication.kerberos.keytab";
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+      "ozone.om.web.authentication.kerberos.principal";
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+      "ozone.om.web.authentication.kerberos.keytab";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
index de27108..21c36fa 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.ozone.ksm.protocol;
 
-import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
@@ -36,7 +36,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * Protocol to talk to KSM.
  */
 @KerberosInfo(
-    serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
 public interface KeySpaceManagerProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
index 71b9da0..84fe154 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.ksm.protocolPB;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.protocol.proto
@@ -32,7 +31,7 @@ import org.apache.hadoop.security.KerberosInfo;
     "org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
     protocolVersion = 1)
 @KerberosInfo(
-    serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface KeySpaceManagerProtocolPB
     extends KeySpaceManagerService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index b917dfe..cc97576 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -29,9 +29,7 @@ import java.util.UUID;
 import java.util.concurrent.Callable;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
@@ -120,12 +118,12 @@ public final class TestSecureOzoneCluster {
   private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
       throws Exception {
     createPrincipal(scmKeytab,
-        conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
+        conf.get(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY));
     createPrincipal(spnegoKeytab,
-        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
-        conf.get(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL));
+        conf.get(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
+        conf.get(KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL));
     createPrincipal(ksmKeyTab,
-        conf.get(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY));
+        conf.get(OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY));
   }
 
   private void createPrincipal(File keytab, String... principal)
@@ -155,25 +153,25 @@ public final class TestSecureOzoneCluster {
         "kerberos");
     conf.set(OZONE_ADMINISTRATORS, curUser);
 
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
         "scm/" + host + "@" + realm);
-    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
         "HTTP_SCM/" + host + "@" + realm);
 
-    conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+    conf.set(OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
         "ksm/" + host + "@" + realm);
-    conf.set(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL,
+    conf.set(KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL,
         "HTTP_KSM/" + host + "@" + realm);
 
     scmKeytab = new File(workDir, "scm.keytab");
     spnegoKeytab = new File(workDir, "http.keytab");
     ksmKeyTab = new File(workDir, "ksm.keytab");
 
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
         scmKeytab.getAbsolutePath());
-    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
         spnegoKeytab.getAbsolutePath());
-    conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+    conf.set(OzoneConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
         ksmKeyTab.getAbsolutePath());
 
   }
@@ -206,7 +204,7 @@ public final class TestSecureOzoneCluster {
   @Test
   public void testSecureScmStartupFailure() throws Exception {
     initSCM();
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "kerberos");
 
@@ -216,9 +214,9 @@ public final class TestSecureOzoneCluster {
           StorageContainerManager.createSCM(null, conf);
         });
 
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
         "scm/_HOST@EXAMPLE.com");
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
         "/etc/security/keytabs/scm.keytab");
 
     testCommonKerberosFailures(
@@ -261,7 +259,7 @@ public final class TestSecureOzoneCluster {
     ksmStore.setScmId("testScmId");
     // writes the version file properties
     ksmStore.initialize();
-    conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+    conf.set(OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
         "non-existent-user@EXAMPLE.com");
     testCommonKerberosFailures(() -> KeySpaceManager.createKSM(null, conf));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index be747d2..3e5a1e8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -88,8 +88,8 @@ import java.util.List;
 import java.util.Map;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
     .OZONE_KSM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
@@ -213,14 +213,15 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
     if (SecurityUtil.getAuthenticationMethod(conf).equals
         (AuthenticationMethod.KERBEROS)) {
       LOG.debug("Ozone security is enabled. Attempting login for KSM user. "
-              + "Principal: {},keytab: {}", conf.get(HDDS_KSM_KERBEROS_PRINCIPAL_KEY),
-          conf.get(HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY));
+              + "Principal: {},keytab: {}", conf.get(
+          OZONE_OM_KERBEROS_PRINCIPAL_KEY),
+          conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
 
       UserGroupInformation.setConfiguration(conf);
 
       InetSocketAddress socAddr = getKsmAddress(conf);
-      SecurityUtil.login(conf, HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
-          HDDS_KSM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+      SecurityUtil.login(conf, OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+          OZONE_OM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
     } else {
       throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
           (conf) + " authentication method not supported. KSM user login "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edc0d2/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
index a0d15b3..9848840 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
@@ -64,11 +64,11 @@ public class KeySpaceManagerHttpServer extends BaseHttpServer {
   }
 
   @Override protected String getKeytabFile() {
-    return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
+    return KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
   }
 
   @Override protected String getSpnegoPrincipal() {
-    return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+    return KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
   }
 
   @Override protected String getEnabledKey() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDDS-78. Add per volume level storage stats in SCM. Contributed by Shashikant Banerjee.

Posted by xy...@apache.org.
HDDS-78. Add per volume level storage stats in SCM.
Contributed by  Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cf6e87f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cf6e87f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cf6e87f

Branch: refs/heads/HDDS-4
Commit: 0cf6e87f9212af10eae39cdcb1fe60e6d8191772
Parents: f24c842
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 26 11:06:22 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sat May 26 11:11:14 2018 -0700

----------------------------------------------------------------------
 .../placement/metrics/SCMNodeStat.java          |  21 --
 .../hdds/scm/node/SCMNodeStorageStatMXBean.java |   8 +
 .../hdds/scm/node/SCMNodeStorageStatMap.java    | 230 +++++++++++++------
 .../hdds/scm/node/StorageReportResult.java      |  87 +++++++
 .../scm/node/TestSCMNodeStorageStatMap.java     | 141 +++++++++---
 5 files changed, 356 insertions(+), 131 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index 4fe72fc..3c871d3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -136,25 +136,4 @@ public class SCMNodeStat implements NodeStat {
   public int hashCode() {
     return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get());
   }
-
-
-  /**
-   * Truncate to 4 digits since uncontrolled precision is some times
-   * counter intuitive to what users expect.
-   * @param value - double.
-   * @return double.
-   */
-  private double truncateDecimals(double value) {
-    final int multiplier = 10000;
-    return (double) ((long) (value * multiplier)) / multiplier;
-  }
-
-  /**
-   * get the scmUsed ratio
-   */
-  public  double getScmUsedratio() {
-    double scmUsedRatio =
-        truncateDecimals(getScmUsed().get() / (double) getCapacity().get());
-    return scmUsedRatio;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
index f17a970..d81ff0f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 
+import java.util.Set;
 import java.util.UUID;
 
 /**
@@ -66,4 +68,10 @@ public interface SCMNodeStorageStatMXBean {
    * @return long
    */
   long getTotalFreeSpace();
+
+  /**
+   * Returns the set of disks for a given Datanode.
+   * @return set of storage volumes
+   */
+  Set<StorageLocationReport> getStorageVolumes(UUID datanodeId);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index 25cb357..f8ad2af 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -22,18 +22,18 @@ package org.apache.hadoop.hdds.scm.node;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.management.ObjectName;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
+import java.io.IOException;
+import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
 
@@ -52,16 +52,15 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   private final double warningUtilizationThreshold;
   private final double criticalUtilizationThreshold;
 
-  private final Map<UUID, SCMNodeStat> scmNodeStorageStatMap;
+  private final Map<UUID, Set<StorageLocationReport>> scmNodeStorageReportMap;
   // NodeStorageInfo MXBean
   private ObjectName scmNodeStorageInfoBean;
-  // Aggregated node stats
-  private SCMNodeStat clusterStat;
   /**
-   * constructs the scmNodeStorageStatMap object
+   * constructs the scmNodeStorageReportMap object
    */
   public SCMNodeStorageStatMap(OzoneConfiguration conf) {
-    scmNodeStorageStatMap = new ConcurrentHashMap<>();
+    // scmNodeStorageReportMap = new ConcurrentHashMap<>();
+    scmNodeStorageReportMap = new ConcurrentHashMap<>();
     warningUtilizationThreshold = conf.getDouble(
         OzoneConfigKeys.
             HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD,
@@ -72,7 +71,6 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
             HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD,
         OzoneConfigKeys.
             HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
-    clusterStat = new SCMNodeStat();
   }
 
   public enum UtilizationThreshold {
@@ -81,20 +79,22 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
 
   /**
    * Returns true if this a datanode that is already tracked by
-   * scmNodeStorageStatMap.
+   * scmNodeStorageReportMap.
    *
    * @param datanodeID - UUID of the Datanode.
    * @return True if this is tracked, false if this map does not know about it.
    */
   public boolean isKnownDatanode(UUID datanodeID) {
     Preconditions.checkNotNull(datanodeID);
-    return scmNodeStorageStatMap.containsKey(datanodeID);
+    return scmNodeStorageReportMap.containsKey(datanodeID);
   }
 
   public List<UUID> getDatanodeList(
       UtilizationThreshold threshold) {
-    return scmNodeStorageStatMap.entrySet().stream()
-        .filter(entry -> (isThresholdReached(threshold, entry.getValue())))
+    return scmNodeStorageReportMap.entrySet().stream().filter(
+        entry -> (isThresholdReached(threshold,
+            getScmUsedratio(getUsedSpace(entry.getKey()),
+                getCapacity(entry.getKey())))))
         .map(Map.Entry::getKey)
         .collect(Collectors.toList());
   }
@@ -105,19 +105,19 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * Insert a new datanode into Node2Container Map.
    *
    * @param datanodeID -- Datanode UUID
-   * @param stat - scmNode stat for the Datanode.
+   * @param report - set if StorageReports.
    */
-  public void insertNewDatanode(UUID datanodeID, SCMNodeStat stat)
+  public void insertNewDatanode(UUID datanodeID, Set<StorageLocationReport> report)
       throws SCMException {
-    Preconditions.checkNotNull(stat);
+    Preconditions.checkNotNull(report);
+    Preconditions.checkState(report.size() != 0);
     Preconditions.checkNotNull(datanodeID);
-    synchronized (scmNodeStorageStatMap) {
+    synchronized (scmNodeStorageReportMap) {
       if (isKnownDatanode(datanodeID)) {
         throw new SCMException("Node already exists in the map",
             DUPLICATE_DATANODE);
       }
-      scmNodeStorageStatMap.put(datanodeID, stat);
-      clusterStat.add(stat);
+      scmNodeStorageReportMap.putIfAbsent(datanodeID, report);
     }
   }
 
@@ -138,72 +138,103 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * Updates the Container list of an existing DN.
    *
    * @param datanodeID - UUID of DN.
-   * @param stat - scmNode stat for the Datanode.
+   * @param report - set of Storage Reports for the Datanode.
    * @throws SCMException - if we don't know about this datanode, for new DN
    *                      use insertNewDatanode.
    */
-  public void updateDatanodeMap(UUID datanodeID, SCMNodeStat stat)
+  public void updateDatanodeMap(UUID datanodeID, Set<StorageLocationReport> report)
       throws SCMException {
     Preconditions.checkNotNull(datanodeID);
-    Preconditions.checkNotNull(stat);
-    synchronized (scmNodeStorageStatMap) {
-      if (!scmNodeStorageStatMap.containsKey(datanodeID)) {
+    Preconditions.checkNotNull(report);
+    Preconditions.checkState(report.size() != 0);
+    synchronized (scmNodeStorageReportMap) {
+      if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
         throw new SCMException("No such datanode", NO_SUCH_DATANODE);
       }
-      SCMNodeStat removed = scmNodeStorageStatMap.get(datanodeID);
-      clusterStat.subtract(removed);
-      scmNodeStorageStatMap.put(datanodeID, stat);
-      clusterStat.add(stat);
+      scmNodeStorageReportMap.put(datanodeID, report);
     }
   }
 
-  public NodeReportStatus processNodeReport(UUID datanodeID,
+  public StorageReportResult processNodeReport(UUID datanodeID,
       StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport)
-      throws SCMException {
+      throws IOException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(nodeReport);
+
     long totalCapacity = 0;
     long totalRemaining = 0;
     long totalScmUsed = 0;
-    List<StorageContainerDatanodeProtocolProtos.SCMStorageReport>
+    Set<StorageLocationReport> storagReportSet = new HashSet<>();
+    Set<StorageLocationReport> fullVolumeSet = new HashSet<>();
+    Set<StorageLocationReport> failedVolumeSet = new HashSet<>();
+    List<SCMStorageReport>
         storageReports = nodeReport.getStorageReportList();
-    for (StorageContainerDatanodeProtocolProtos.SCMStorageReport report : storageReports) {
+    for (SCMStorageReport report : storageReports) {
+      StorageLocationReport storageReport =
+          StorageLocationReport.getFromProtobuf(report);
+      storagReportSet.add(storageReport);
+      if (report.hasFailed() && report.getFailed()) {
+        failedVolumeSet.add(storageReport);
+      } else if (isThresholdReached(UtilizationThreshold.CRITICAL,
+          getScmUsedratio(report.getScmUsed(), report.getCapacity()))) {
+        fullVolumeSet.add(storageReport);
+      }
       totalCapacity += report.getCapacity();
       totalRemaining += report.getRemaining();
       totalScmUsed += report.getScmUsed();
     }
-    SCMNodeStat stat = scmNodeStorageStatMap.get(datanodeID);
-    if (stat == null) {
-      stat = new SCMNodeStat();
-      stat.set(totalCapacity, totalScmUsed, totalRemaining);
-      insertNewDatanode(datanodeID, stat);
+
+    if (!isKnownDatanode(datanodeID)) {
+      insertNewDatanode(datanodeID, storagReportSet);
     } else {
-      stat.set(totalCapacity, totalScmUsed, totalRemaining);
-      updateDatanodeMap(datanodeID, stat);
+      updateDatanodeMap(datanodeID, storagReportSet);
     }
-    if (isThresholdReached(UtilizationThreshold.CRITICAL, stat)) {
+    if (isThresholdReached(UtilizationThreshold.CRITICAL,
+        getScmUsedratio(totalScmUsed, totalCapacity))) {
       LOG.warn("Datanode {} is out of storage space. Capacity: {}, Used: {}",
-          datanodeID, stat.getCapacity().get(), stat.getScmUsed().get());
-      return NodeReportStatus.DATANODE_OUT_OF_SPACE;
-    } else {
-      if (isThresholdReached(UtilizationThreshold.WARN, stat)) {
-       LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}",
-           datanodeID, stat.getCapacity().get(), stat.getScmUsed().get());
-      }
-      return NodeReportStatus.ALL_IS_WELL;
+          datanodeID, totalCapacity, totalScmUsed);
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.DATANODE_OUT_OF_SPACE)
+          .setFullVolumeSet(fullVolumeSet).setFailedVolumeSet(failedVolumeSet)
+          .build();
+    }
+    if (isThresholdReached(UtilizationThreshold.WARN,
+        getScmUsedratio(totalScmUsed, totalCapacity))) {
+      LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}",
+          datanodeID, totalCapacity, totalScmUsed);
     }
+
+    if (failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) {
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.STORAGE_OUT_OF_SPACE)
+          .setFullVolumeSet(fullVolumeSet).build();
+    }
+
+    if (!failedVolumeSet.isEmpty() && fullVolumeSet.isEmpty()) {
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.FAILED_STORAGE)
+          .setFailedVolumeSet(failedVolumeSet).build();
+    }
+    if (!failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) {
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE)
+          .setFailedVolumeSet(failedVolumeSet).setFullVolumeSet(fullVolumeSet)
+          .build();
+    }
+    return StorageReportResult.ReportResultBuilder.newBuilder()
+        .setStatus(ReportStatus.ALL_IS_WELL).build();
   }
 
   private boolean isThresholdReached(UtilizationThreshold threshold,
-      SCMNodeStat stat) {
+      double scmUsedratio) {
     switch (threshold) {
     case NORMAL:
-      return stat.getScmUsedratio() < warningUtilizationThreshold;
+      return scmUsedratio < warningUtilizationThreshold;
     case WARN:
-      return stat.getScmUsedratio() >= warningUtilizationThreshold &&
-          stat.getScmUsedratio() < criticalUtilizationThreshold;
+      return scmUsedratio >= warningUtilizationThreshold
+          && scmUsedratio < criticalUtilizationThreshold;
     case CRITICAL:
-      return stat.getScmUsedratio() >= criticalUtilizationThreshold;
+      return scmUsedratio >= criticalUtilizationThreshold;
     default:
       throw new RuntimeException("Unknown UtilizationThreshold value");
     }
@@ -211,67 +242,120 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
 
   @Override
   public long getCapacity(UUID dnId) {
-    return scmNodeStorageStatMap.get(dnId).getCapacity().get();
+    long capacity = 0;
+    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
+    for (StorageLocationReport report : reportSet) {
+      capacity += report.getCapacity();
+    }
+    return capacity;
   }
 
   @Override
   public long getRemainingSpace(UUID dnId) {
-    return scmNodeStorageStatMap.get(dnId).getRemaining().get();
+    long remaining = 0;
+    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
+    for (StorageLocationReport report : reportSet) {
+      remaining += report.getRemaining();
+    }
+    return remaining;
   }
 
   @Override
   public long getUsedSpace(UUID dnId) {
-    return scmNodeStorageStatMap.get(dnId).getScmUsed().get();
+    long scmUsed = 0;
+    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
+    for (StorageLocationReport report : reportSet) {
+      scmUsed += report.getScmUsed();
+    }
+    return scmUsed;
   }
 
   @Override
   public long getTotalCapacity() {
-    return clusterStat.getCapacity().get();
+    long capacity = 0;
+    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
+    for (UUID id : dnIdSet) {
+      capacity += getCapacity(id);
+    }
+    return capacity;
   }
 
   @Override
   public long getTotalSpaceUsed() {
-    return clusterStat.getScmUsed().get();
+    long scmUsed = 0;
+    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
+    for (UUID id : dnIdSet) {
+      scmUsed += getUsedSpace(id);
+    }
+    return scmUsed;
   }
 
   @Override
   public long getTotalFreeSpace() {
-    return clusterStat.getRemaining().get();
+    long remaining = 0;
+    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
+    for (UUID id : dnIdSet) {
+      remaining += getRemainingSpace(id);
+    }
+    return remaining;
   }
 
   /**
-   * removes the dataNode from scmNodeStorageStatMap
+   * removes the dataNode from scmNodeStorageReportMap
    * @param datanodeID
    * @throws SCMException in case the dataNode is not found in the map.
    */
   public void removeDatanode(UUID datanodeID) throws SCMException {
     Preconditions.checkNotNull(datanodeID);
-    synchronized (scmNodeStorageStatMap) {
-      if (!scmNodeStorageStatMap.containsKey(datanodeID)) {
+    synchronized (scmNodeStorageReportMap) {
+      if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
         throw new SCMException("No such datanode", NO_SUCH_DATANODE);
       }
-      SCMNodeStat stat = scmNodeStorageStatMap.remove(datanodeID);
-      clusterStat.subtract(stat);
+      scmNodeStorageReportMap.remove(datanodeID);
     }
   }
 
   /**
-   * Gets the SCMNodeStat for the datanode
+   * Returns the set of storage volumes for a Datanode.
    * @param  datanodeID
-   * @return SCMNodeStat
+   * @return set of storage volumes.
    */
 
-  SCMNodeStat getNodeStat(UUID datanodeID) {
-    return scmNodeStorageStatMap.get(datanodeID);
+  @Override
+  public Set<StorageLocationReport> getStorageVolumes(UUID datanodeID) {
+    return scmNodeStorageReportMap.get(datanodeID);
   }
 
+
+  /**
+   * Truncate to 4 digits since uncontrolled precision is some times
+   * counter intuitive to what users expect.
+   * @param value - double.
+   * @return double.
+   */
+  private double truncateDecimals(double value) {
+    final int multiplier = 10000;
+    return (double) ((long) (value * multiplier)) / multiplier;
+  }
+
+  /**
+   * get the scmUsed ratio
+   */
+  public  double getScmUsedratio(long scmUsed, long capacity) {
+    double scmUsedRatio =
+        truncateDecimals (scmUsed / (double) capacity);
+    return scmUsedRatio;
+  }
   /**
    * Results possible from processing a Node report by
    * Node2ContainerMapper.
    */
-  public enum NodeReportStatus {
+  public enum ReportStatus {
     ALL_IS_WELL,
-    DATANODE_OUT_OF_SPACE
+    DATANODE_OUT_OF_SPACE,
+    STORAGE_OUT_OF_SPACE,
+    FAILED_STORAGE,
+    FAILED_AND_OUT_OF_SPACE_STORAGE
   }
 
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
new file mode 100644
index 0000000..3436e77
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
@@ -0,0 +1,87 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
+
+import java.util.Set;
+
+/**
+ * A Container Report gets processsed by the Node2Container and returns the
+ * Report Result class.
+ */
+public class StorageReportResult {
+  private SCMNodeStorageStatMap.ReportStatus status;
+  private Set<StorageLocationReport> fullVolumes;
+  private Set<StorageLocationReport> failedVolumes;
+
+  StorageReportResult(SCMNodeStorageStatMap.ReportStatus status,
+      Set<StorageLocationReport> fullVolumes,
+      Set<StorageLocationReport> failedVolumes) {
+    this.status = status;
+    this.fullVolumes = fullVolumes;
+    this.failedVolumes = failedVolumes;
+  }
+
+  public SCMNodeStorageStatMap.ReportStatus getStatus() {
+    return status;
+  }
+
+  public Set<StorageLocationReport> getFullVolumes() {
+    return fullVolumes;
+  }
+
+  public Set<StorageLocationReport> getFailedVolumes() {
+    return failedVolumes;
+  }
+
+  static class ReportResultBuilder {
+    private SCMNodeStorageStatMap.ReportStatus status;
+    private Set<StorageLocationReport> fullVolumes;
+    private Set<StorageLocationReport> failedVolumes;
+
+    static ReportResultBuilder newBuilder() {
+      return new ReportResultBuilder();
+    }
+
+    public ReportResultBuilder setStatus(
+        SCMNodeStorageStatMap.ReportStatus newstatus) {
+      this.status = newstatus;
+      return this;
+    }
+
+    public ReportResultBuilder setFullVolumeSet(
+        Set<StorageLocationReport> fullVolumes) {
+      this.fullVolumes = fullVolumes;
+      return this;
+    }
+
+    public ReportResultBuilder setFailedVolumeSet(
+        Set<StorageLocationReport> failedVolumes) {
+      this.failedVolumes = failedVolumes;
+      return this;
+    }
+
+    StorageReportResult build() {
+      return new StorageReportResult(status, fullVolumes, failedVolumes);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 2fa786b..571de77 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -17,38 +17,56 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.*;
+import org.junit.Rule;
 import org.junit.rules.ExpectedException;
 
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
+import java.util.Set;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.io.IOException;
 import java.util.concurrent.ConcurrentHashMap;
 
 public class TestSCMNodeStorageStatMap {
-  private final static int DATANODE_COUNT = 300;
+  private final static int DATANODE_COUNT = 100;
   final long capacity = 10L * OzoneConsts.GB;
   final long used = 2L * OzoneConsts.GB;
   final long remaining = capacity - used;
   private static OzoneConfiguration conf = new OzoneConfiguration();
-  private final Map<UUID, SCMNodeStat> testData = new ConcurrentHashMap<>();
+  private final Map<UUID, Set<StorageLocationReport>> testData =
+      new ConcurrentHashMap<>();
 
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
   private void generateData() {
-    SCMNodeStat stat = new SCMNodeStat();
-    stat.set(capacity, used, remaining);
     for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
-      testData.put(UUID.randomUUID(), stat);
+      UUID dnId = UUID.randomUUID();
+      Set<StorageLocationReport> reportSet = new HashSet<>();
+      String path = GenericTestUtils.getTempPath(
+          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + Integer
+              .toString(dnIndex));
+      StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
+      builder.setStorageType(StorageType.DISK).setId(dnId.toString())
+          .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
+          .setCapacity(capacity).setFailed(false);
+      reportSet.add(builder.build());
+      testData.put(UUID.randomUUID(), reportSet);
     }
   }
 
@@ -70,8 +88,8 @@ public class TestSCMNodeStorageStatMap {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     UUID knownNode = getFirstKey();
     UUID unknownNode = UUID.randomUUID();
-    SCMNodeStat stat = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, stat);
+    Set<StorageLocationReport> report = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, report);
     Assert.assertTrue("Not able to detect a known node",
         map.isKnownDatanode(knownNode));
     Assert.assertFalse("Unknown node detected",
@@ -82,54 +100,89 @@ public class TestSCMNodeStorageStatMap {
   public void testInsertNewDatanode() throws SCMException {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     UUID knownNode = getFirstKey();
-    SCMNodeStat stat = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, stat);
-    Assert.assertEquals(map.getNodeStat(knownNode).getScmUsed(),
-        testData.get(knownNode).getScmUsed());
+    Set<StorageLocationReport> report = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, report);
+    Assert.assertEquals(map.getStorageVolumes(knownNode),
+        testData.get(knownNode));
     thrown.expect(SCMException.class);
     thrown.expectMessage("already exists");
-    map.insertNewDatanode(knownNode, stat);
+    map.insertNewDatanode(knownNode, report);
   }
 
   @Test
   public void testUpdateUnknownDatanode() throws SCMException {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     UUID unknownNode = UUID.randomUUID();
-    SCMNodeStat stat = new SCMNodeStat();
-
+    String path = GenericTestUtils.getTempPath(
+        TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode
+            .toString());
+    Set<StorageLocationReport> reportSet = new HashSet<>();
+    StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
+    builder.setStorageType(StorageType.DISK).setId(unknownNode.toString())
+        .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
+        .setCapacity(capacity).setFailed(false);
+    reportSet.add(builder.build());
     thrown.expect(SCMException.class);
     thrown.expectMessage("No such datanode");
-    map.updateDatanodeMap(unknownNode, stat);
+    map.updateDatanodeMap(unknownNode, reportSet);
   }
 
   @Test
-  public void testProcessNodeReportCheckOneNode() throws SCMException {
+  public void testProcessNodeReportCheckOneNode() throws IOException {
     UUID key = getFirstKey();
-    SCMNodeStat value = testData.get(key);
+    List<SCMStorageReport> reportList = new ArrayList<>();
+    Set<StorageLocationReport> reportSet = testData.get(key);
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
-    map.insertNewDatanode(key, value);
+    map.insertNewDatanode(key, reportSet);
     Assert.assertTrue(map.isKnownDatanode(key));
     String storageId = UUID.randomUUID().toString();
     String path =
         GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
-    long capacity = value.getCapacity().get();
-    long used = value.getScmUsed().get();
-    long remaining = value.getRemaining().get();
+    StorageLocationReport report = reportSet.iterator().next();
+    long capacity = report.getCapacity();
+    long used = report.getScmUsed();
+    long remaining = report.getRemaining();
     List<SCMStorageReport> reports = TestUtils
         .createStorageReport(capacity, used, remaining, path, null, storageId,
             1);
-    SCMNodeStorageStatMap.NodeReportStatus status =
+    StorageReportResult result =
         map.processNodeReport(key, TestUtils.createNodeReport(reports));
-    Assert.assertEquals(status,
-        SCMNodeStorageStatMap.NodeReportStatus.ALL_IS_WELL);
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
+    StorageContainerDatanodeProtocolProtos.SCMNodeReport.Builder nrb =
+        SCMNodeReport.newBuilder();
+    SCMStorageReport srb = reportSet.iterator().next().getProtoBufMessage();
+    reportList.add(srb);
+    result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
+
+    reportList.add(TestUtils
+        .createStorageReport(capacity, capacity, 0, path, null,
+            UUID.randomUUID().toString(), 1).get(0));
+    result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);
+    // Mark a disk failed 
+    SCMStorageReport srb2 = SCMStorageReport.newBuilder()
+        .setStorageUuid(UUID.randomUUID().toString())
+        .setStorageLocation(srb.getStorageLocation()).setScmUsed(capacity)
+        .setCapacity(capacity).setRemaining(0).setFailed(true).build();
+    reportList.add(srb2);
+    nrb.addAllStorageReport(reportList);
+    result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE);
+
   }
 
   @Test
-  public void testProcessNodeReportAndSCMStats() throws SCMException {
+  public void testProcessMultipleNodeReports() throws SCMException {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     int counter = 1;
     // Insert all testData into the SCMNodeStorageStatMap Map.
-    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
+    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
+        .entrySet()) {
       map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
     }
     Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity());
@@ -137,9 +190,21 @@ public class TestSCMNodeStorageStatMap {
     Assert.assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed());
 
     // upadate 1/4th of the datanode to be full
-    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
-      SCMNodeStat stat = new SCMNodeStat(capacity, capacity, 0);
-      map.updateDatanodeMap(keyEntry.getKey(), stat);
+    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
+        .entrySet()) {
+      Set<StorageLocationReport> reportSet = new HashSet<>();
+      String path = GenericTestUtils.getTempPath(
+          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry
+              .getKey().toString());
+      StorageLocationReport.Builder builder =
+          StorageLocationReport.newBuilder();
+      builder.setStorageType(StorageType.DISK)
+          .setId(keyEntry.getKey().toString()).setStorageLocation(path)
+          .setScmUsed(capacity).setRemaining(0).setCapacity(capacity)
+          .setFailed(false);
+      reportSet.add(builder.build());
+
+      map.updateDatanodeMap(keyEntry.getKey(), reportSet);
       counter++;
       if (counter > DATANODE_COUNT / 4) {
         break;
@@ -163,7 +228,8 @@ public class TestSCMNodeStorageStatMap {
         map.getTotalSpaceUsed(), 0);
     counter = 1;
     // Remove 1/4 of the DataNodes from the Map
-    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
+    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
+        .entrySet()) {
       map.removeDatanode(keyEntry.getKey());
       counter++;
       if (counter > DATANODE_COUNT / 4) {
@@ -181,12 +247,13 @@ public class TestSCMNodeStorageStatMap {
         map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL)
             .size(), 0);
 
-    Assert.assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
+    Assert
+        .assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(),
+            0);
     Assert.assertEquals(0.75 * DATANODE_COUNT * remaining,
         map.getTotalFreeSpace(), 0);
-    Assert.assertEquals(
-        0.75 * DATANODE_COUNT * used ,
-        map.getTotalSpaceUsed(), 0);
+    Assert
+        .assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0);
 
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via wangda)

Posted by xy...@apache.org.
YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via wangda)

Change-Id: I79a42154e8f86ab1c3cc939b3745024b8eebe5f4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17aa40f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17aa40f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17aa40f6

Branch: refs/heads/HDDS-4
Commit: 17aa40f669f197d43387d67dc00040d14cd00948
Parents: 3061bfc
Author: Wangda Tan <wa...@apache.org>
Authored: Tue May 29 09:27:36 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue May 29 09:27:36 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/yarn/util/resource/ResourceCalculator.java | 4 ++--
 .../monitor/capacity/CapacitySchedulerPreemptionUtils.java   | 8 ++++----
 2 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 51078cd..27394f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -260,10 +260,10 @@ public abstract class ResourceCalculator {
 
   /**
    * Check if resource has any major resource types (which are all NodeManagers
-   * included) has a >0 value.
+   * included) has a {@literal >} 0 value.
    *
    * @param resource resource
-   * @return returns true if any resource is >0
+   * @return returns true if any resource is {@literal >} 0
    */
   public abstract boolean isAnyMajorResourceAboveZero(Resource resource);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index 5396d61..690eb02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -136,12 +136,12 @@ public class CapacitySchedulerPreemptionUtils {
    * @param conservativeDRF
    *          should we do conservativeDRF preemption or not.
    *          When true:
-   *            stop preempt container when any major resource type <= 0 for to-
-   *            preempt.
+   *            stop preempt container when any major resource type
+   *            {@literal <=} 0 for to-preempt.
    *            This is default preemption behavior of intra-queue preemption
    *          When false:
-   *            stop preempt container when: all major resource type <= 0 for
-   *            to-preempt.
+   *            stop preempt container when: all major resource type
+   *            {@literal <=} 0 for to-preempt.
    *            This is default preemption behavior of inter-queue preemption
    * @return should we preempt rmContainer. If we should, deduct from
    *         <code>resourceToObtainByPartition</code>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDFS-13628. Update Archival Storage doc for Provided Storage

Posted by xy...@apache.org.
HDFS-13628. Update Archival Storage doc for Provided Storage

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04757e58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04757e58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04757e58

Branch: refs/heads/HDDS-4
Commit: 04757e5864bd4904fd5a59d143fff480814700e4
Parents: 88cbe57
Author: Takanobu Asanuma <ta...@yahoo-corp.jp>
Authored: Mon May 28 19:04:36 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 28 19:06:34 2018 +0900

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md             | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04757e58/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index ab7975a..3c49cb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -35,7 +35,7 @@ A new storage type *ARCHIVE*, which has high storage density (petabyte of storag
 
 Another new storage type *RAM\_DISK* is added for supporting writing single replica files in memory.
 
-### Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD and Lazy\_Persist
+### Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD, Lazy\_Persist and Provided
 
 A new concept of storage policies is introduced in order to allow files to be stored in different storage types according to the storage policy.
 
@@ -47,6 +47,7 @@ We have the following storage policies:
 * **All\_SSD** - for storing all replicas in SSD.
 * **One\_SSD** - for storing one of the replicas in SSD. The remaining replicas are stored in DISK.
 * **Lazy\_Persist** - for writing blocks with single replica in memory. The replica is first written in RAM\_DISK and then it is lazily persisted in DISK.
+* **Provided** - for storing data outside HDFS. See also [HDFS Provided Storage](./HdfsProvidedStorage.html).
 
 More formally, a storage policy consists of the following fields:
 
@@ -68,6 +69,7 @@ The following is a typical storage policy table.
 | 7 | Hot (default) | DISK: *n* | \<none\> | ARCHIVE |
 | 5 | Warm | DISK: 1, ARCHIVE: *n*-1 | ARCHIVE, DISK | ARCHIVE, DISK |
 | 2 | Cold | ARCHIVE: *n* | \<none\> | \<none\> |
+| 1 | Provided | PROVIDED: 1, DISK: *n*-1 | PROVIDED, DISK | PROVIDED, DISK |
 
 Note 1: The Lazy\_Persist policy is useful only for single replica blocks. For blocks with more than one replicas, all the replicas will be written to DISK since writing only one of the replicas to RAM\_DISK does not improve the overall performance.
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs. Contributed by Dinesh Chitlangia.

Posted by xy...@apache.org.
HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8733012a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8733012a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8733012a

Branch: refs/heads/HDDS-4
Commit: 8733012ae35f2762d704f94975a762885d116795
Parents: 1e0d4b1
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 25 13:06:14 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 25 13:06:14 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/conf/OzoneConfiguration.java    |   6 +-
 hadoop-ozone/common/src/main/bin/ozone          |   4 +
 ...TestGenerateOzoneRequiredConfigurations.java | 100 +++++++++++
 .../GenerateOzoneRequiredConfigurations.java    | 174 +++++++++++++++++++
 .../hadoop/ozone/genconf/package-info.java      |  24 +++
 5 files changed, 305 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index f07718c..36d953c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -137,7 +137,7 @@ public class OzoneConfiguration extends Configuration {
 
     @Override
     public String toString() {
-      return this.getName() + " " + this.getValue() + this.getTag();
+      return this.getName() + " " + this.getValue() + " " + this.getTag();
     }
 
     @Override
@@ -152,11 +152,11 @@ public class OzoneConfiguration extends Configuration {
     }
   }
 
-  public static void activate(){
+  public static void activate() {
     // adds the default resources
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
     Configuration.addDefaultResource("ozone-default.xml");
     Configuration.addDefaultResource("ozone-site.xml");
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 00261c7..6843bdd 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -47,6 +47,7 @@ function hadoop_usage
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
   hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager "
   hadoop_add_subcommand "version" client "print the version"
+  hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
@@ -118,6 +119,9 @@ function ozonecmd_case
     version)
       HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
     ;;
+    genconf)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations
+    ;;
     *)
       HADOOP_CLASSNAME="${subcmd}"
       if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
new file mode 100644
index 0000000..82582a6
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.genconf;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+
+/**
+ * Tests GenerateOzoneRequiredConfigurations.
+ */
+public class TestGenerateOzoneRequiredConfigurations {
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests a valid path and generates ozone-site.xml.
+   * @throws Exception
+   */
+  @Test
+  public void generateConfigurationsSuccess() throws Exception {
+    String[] args = new String[]{"-output", "."};
+    GenerateOzoneRequiredConfigurations.main(args);
+
+    Assert.assertEquals("Path is valid",
+        true, GenerateOzoneRequiredConfigurations.isValidPath(args[1]));
+
+    Assert.assertEquals("Permission is valid",
+        true, GenerateOzoneRequiredConfigurations.canWrite(args[1]));
+
+    Assert.assertEquals("Config file generated",
+        0, GenerateOzoneRequiredConfigurations.generateConfigurations(args[1]));
+  }
+
+  /**
+   * Test to avoid generating ozone-site.xml when invalid permission.
+   * @throws Exception
+   */
+  @Test
+  public void generateConfigurationsFailure() throws Exception {
+    String[] args = new String[]{"-output", "/"};
+    GenerateOzoneRequiredConfigurations.main(args);
+
+    Assert.assertEquals("Path is valid",
+        true, GenerateOzoneRequiredConfigurations.isValidPath(args[1]));
+
+    Assert.assertEquals("Invalid permission",
+        false, GenerateOzoneRequiredConfigurations.canWrite(args[1]));
+
+    Assert.assertEquals("Config file not generated",
+        1, GenerateOzoneRequiredConfigurations.generateConfigurations(args[1]));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
new file mode 100644
index 0000000..6296c9d
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.genconf;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import java.io.File;
+import java.net.URL;
+import java.nio.file.InvalidPathException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * GenerateOzoneRequiredConfigurations - A tool to generate ozone-site.xml<br>
+ * This tool generates an ozone-site.xml with minimally required configs.
+ * This tool can be invoked as follows:<br>
+ * <ul>
+ * <li>ozone genconf -output <Path to output file></li>
+ * <li>ozone genconf -help</li>
+ * </ul>
+ */
+public final class GenerateOzoneRequiredConfigurations {
+
+  private static final String OUTPUT = "-output";
+  private static final String HELP = "-help";
+  private static final String USAGE = "Usage: \nozone genconf "
+      + OUTPUT + " <Path to output file> \n"
+      + "ozone genconf "
+      + HELP;
+  private static final int SUCCESS = 0;
+  private static final int FAILURE = 1;
+
+  private GenerateOzoneRequiredConfigurations() {
+
+  }
+  /**
+   * Entry point for using genconf tool.
+   *
+   * @param args
+   * @throws JAXBException
+   */
+  public static void main(String[] args) {
+
+    try {
+      if (args.length == 0) {
+        System.out.println(USAGE);
+        System.exit(1);
+      }
+
+      switch (args[0]) {
+      case OUTPUT:
+        if (args.length > 1) {
+          int result = generateConfigurations(args[1]);
+        } else {
+          System.out.println("Path to output file is mandatory");
+          System.out.println(USAGE);
+          System.exit(1);
+        }
+        break;
+
+      case HELP:
+        System.out.println(USAGE);
+        System.exit(0);
+        break;
+
+      default:
+        System.out.println(USAGE);
+        System.exit(1);
+      }
+
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Check if the path is valid.
+   *
+   * @param path
+   * @return true, if path is valid, else return false
+   */
+  public static boolean isValidPath(String path) {
+    try {
+      Paths.get(path);
+    } catch (InvalidPathException | NullPointerException ex) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Check if user has permission to write in the specified path.
+   *
+   * @param path
+   * @return true, if the user has permission to write, else returns false
+   */
+  public static boolean canWrite(String path) {
+    File file = new File(path);
+    return file.canWrite();
+  }
+
+  /**
+   * Generate ozone-site.xml at specified path.
+   *
+   * @param path
+   * @return SUCCESS(0) if file can be generated, else returns FAILURE(1)
+   * @throws JAXBException
+   */
+  public static int generateConfigurations(String path) throws JAXBException {
+
+    if (!isValidPath(path)) {
+      System.out.println("Invalid path or insufficient permission");
+      return FAILURE;
+    }
+
+    if (!canWrite(path)) {
+      System.out.println("Invalid path or insufficient permission");
+      return FAILURE;
+    }
+
+    OzoneConfiguration oc = new OzoneConfiguration();
+
+    ClassLoader cL = Thread.currentThread().getContextClassLoader();
+    if (cL == null) {
+      cL = OzoneConfiguration.class.getClassLoader();
+    }
+    URL url = cL.getResource("ozone-default.xml");
+
+    List<OzoneConfiguration.Property> allProperties =
+        oc.readPropertyFromXml(url);
+
+    List<OzoneConfiguration.Property> requiredProperties = new ArrayList<>();
+
+    for (OzoneConfiguration.Property p : allProperties) {
+      if (p.getTag() != null && p.getTag().contains("REQUIRED")) {
+        requiredProperties.add(p);
+      }
+    }
+
+    OzoneConfiguration.XMLConfiguration requiredConfig =
+        new OzoneConfiguration.XMLConfiguration();
+    requiredConfig.setProperties(requiredProperties);
+
+    JAXBContext context =
+        JAXBContext.newInstance(OzoneConfiguration.XMLConfiguration.class);
+    Marshaller m = context.createMarshaller();
+    m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
+    m.marshal(requiredConfig, new File(path, "ozone-site.xml"));
+
+    System.out.println("ozone-site.xml has been generated at " + path);
+
+    return SUCCESS;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
new file mode 100644
index 0000000..4817d39
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.genconf;
+
+/**
+ * Command line tool to generate required Ozone configs to an ozone-site.xml.
+ */
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org