You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2017/09/22 01:52:31 UTC

[01/50] hadoop git commit: HADOOP-14853. hadoop-mapreduce-client-app is not a client module (haibochen via rkanter) [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 7d5bd3eb0 -> a0e16d41d (forced update)


HADOOP-14853. hadoop-mapreduce-client-app is not a client module (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9b607da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9b607da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9b607da

Branch: refs/heads/YARN-6592
Commit: b9b607daa74322a2928f7671a985a60388b9b9c2
Parents: fbe06b5
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Sep 15 13:43:39 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Sep 15 13:53:11 2017 -0700

----------------------------------------------------------------------
 hadoop-client-modules/hadoop-client/pom.xml | 49 ------------------------
 1 file changed, 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b607da/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index b802416..bed3f5c 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -131,55 +131,6 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-app</artifactId>
-      <scope>compile</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>javax.servlet-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-yarn-server-nodemanager</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-annotations</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.google.inject.extensions</groupId>
-          <artifactId>guice-servlet</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>junit</groupId>
-          <artifactId>junit</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.avro</groupId>
-          <artifactId>avro</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <!-- No slf4j backends for downstream clients -->
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.zookeeper</groupId>
-          <artifactId>zookeeper</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-api</artifactId>
       <scope>compile</scope>
       <exclusions>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] hadoop git commit: YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource parameter. (Sen Zhao via wangda)

Posted by as...@apache.org.
YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource parameter. (Sen Zhao via wangda)

Change-Id: Icc3670c9381ce7591ca69ec12da5aa52d3612d34


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e81596d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e81596d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e81596d0

Branch: refs/heads/YARN-6592
Commit: e81596d06d226f1cfa44b2390ce3095ed4dee621
Parents: 8d7cc22
Author: Wangda Tan <wa...@apache.org>
Authored: Sun Sep 17 21:20:43 2017 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Sun Sep 17 21:20:43 2017 -0700

----------------------------------------------------------------------
 .../resource/DefaultResourceCalculator.java     |  3 +-
 .../resource/DominantResourceCalculator.java    |  2 +-
 .../yarn/util/resource/ResourceCalculator.java  |  3 +-
 .../hadoop/yarn/util/resource/Resources.java    |  4 +--
 .../util/resource/TestResourceCalculator.java   | 24 +++++++-------
 .../server/resourcemanager/RMServerUtils.java   |  3 +-
 .../CapacitySchedulerPreemptionUtils.java       |  4 +--
 ...QueuePriorityContainerCandidateSelector.java |  5 ++-
 .../ReservedContainerCandidatesSelector.java    | 34 +++++++++-----------
 .../scheduler/capacity/AbstractCSQueue.java     |  2 +-
 .../allocator/RegularContainerAllocator.java    |  8 ++---
 .../scheduler/common/fica/FiCaSchedulerApp.java | 21 +++++-------
 .../scheduler/capacity/TestReservations.java    | 20 +++++-------
 13 files changed, 55 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index bdf60bd..7f155e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -123,8 +123,7 @@ public class DefaultResourceCalculator extends ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster,
-      Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
     return smaller.getMemorySize() <= bigger.getMemorySize();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index d64f03e..ca828a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -538,7 +538,7 @@ public class DominantResourceCalculator extends ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster, Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
     int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
     for (int i = 0; i < maxLength; i++) {
       ResourceInformation sResourceInformation = smaller

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 398dac5..d59560f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -225,8 +225,7 @@ public abstract class ResourceCalculator {
   /**
    * Check if a smaller resource can be contained by bigger resource.
    */
-  public abstract boolean fitsIn(Resource cluster,
-      Resource smaller, Resource bigger);
+  public abstract boolean fitsIn(Resource smaller, Resource bigger);
 
   /**
    * Check if resource has any major resource types (which are all NodeManagers

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 325bce4..793aebf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -437,9 +437,9 @@ public class Resources {
     return true;
   }
 
-  public static boolean fitsIn(ResourceCalculator rc, Resource cluster,
+  public static boolean fitsIn(ResourceCalculator rc,
       Resource smaller, Resource bigger) {
-    return rc.fitsIn(cluster, smaller, bigger);
+    return rc.fitsIn(smaller, bigger);
   }
   
   public static Resource componentwiseMin(Resource lhs, Resource rhs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
index 5b4155c..5f3ed19 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
@@ -24,14 +24,13 @@ import java.util.Collection;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 
 @RunWith(Parameterized.class)
 public class TestResourceCalculator {
@@ -64,29 +63,28 @@ public class TestResourceCalculator {
   
   @Test(timeout = 10000)
   public void testFitsIn() {
-    Resource cluster = Resource.newInstance(1024, 1);
 
     if (resourceCalculator instanceof DefaultResourceCalculator) {
-      assertTrue(resourceCalculator.fitsIn(cluster,
+      Assert.assertTrue(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(2, 1)));
-      assertTrue(resourceCalculator.fitsIn(cluster,
+      Assert.assertTrue(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(2, 2)));
-      assertTrue(resourceCalculator.fitsIn(cluster,
+      Assert.assertTrue(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(1, 2)));
-      assertTrue(resourceCalculator.fitsIn(cluster,
+      Assert.assertTrue(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(1, 1)));
-      assertFalse(resourceCalculator.fitsIn(cluster,
+      Assert.assertFalse(resourceCalculator.fitsIn(
           Resource.newInstance(2, 1), Resource.newInstance(1, 2)));
     } else if (resourceCalculator instanceof DominantResourceCalculator) {
-      assertFalse(resourceCalculator.fitsIn(cluster,
+      Assert.assertFalse(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(2, 1)));
-      assertTrue(resourceCalculator.fitsIn(cluster,
+      Assert.assertTrue(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(2, 2)));
-      assertTrue(resourceCalculator.fitsIn(cluster,
+      Assert.assertTrue(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(1, 2)));
-      assertFalse(resourceCalculator.fitsIn(cluster,
+      Assert.assertFalse(resourceCalculator.fitsIn(
           Resource.newInstance(1, 2), Resource.newInstance(1, 1)));
-      assertFalse(resourceCalculator.fitsIn(cluster,
+      Assert.assertFalse(resourceCalculator.fitsIn(
           Resource.newInstance(2, 1), Resource.newInstance(1, 2)));
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 2aae3a5..4934243 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -300,8 +300,7 @@ public class RMServerUtils {
     // Target resource of the increase request is more than NM can offer
     ResourceScheduler scheduler = rmContext.getScheduler();
     RMNode rmNode = request.getSchedulerNode().getRMNode();
-    if (!Resources.fitsIn(scheduler.getResourceCalculator(),
-        scheduler.getClusterResource(), targetResource,
+    if (!Resources.fitsIn(scheduler.getResourceCalculator(), targetResource,
         rmNode.getTotalCapability())) {
       String msg = "Target resource=" + targetResource + " of containerId="
           + containerId + " is more than node's total resource="

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index 0ae3ef0..f097e9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -156,8 +156,8 @@ public class CapacitySchedulerPreemptionUtils {
     if (null != toObtainByPartition
         && Resources.greaterThan(rc, clusterResource, toObtainByPartition,
             Resources.none())
-        && Resources.fitsIn(rc, clusterResource,
-            rmContainer.getAllocatedResource(), totalPreemptionAllowed)
+        && Resources.fitsIn(rc, rmContainer.getAllocatedResource(),
+            totalPreemptionAllowed)
         && !Resources.isAnyMajorResourceZero(rc, toObtainByPartition)) {
       Resources.subtractFrom(toObtainByPartition,
           rmContainer.getAllocatedResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
index c730a2d..7b7404c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
@@ -229,8 +229,7 @@ public class QueuePriorityContainerCandidateSelector
 
     // If we already can allocate the reserved container after preemption,
     // skip following steps
-    if (Resources.fitsIn(rc, clusterResource, lacking,
-        Resources.none())) {
+    if (Resources.fitsIn(rc, lacking, Resources.none())) {
       return true;
     }
 
@@ -270,7 +269,7 @@ public class QueuePriorityContainerCandidateSelector
       }
 
       // Lacking <= 0 means we can allocate the reserved container
-      if (Resources.fitsIn(rc, clusterResource, lacking, Resources.none())) {
+      if (Resources.fitsIn(rc, lacking, Resources.none())) {
         return true;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
index de23d0a..ff100d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
@@ -87,8 +87,8 @@ public class ReservedContainerCandidatesSelector
 
     // Get list of nodes for preemption, ordered by preemption cost
     List<NodeForPreemption> nodesForPreemption = getNodesForPreemption(
-        clusterResource, queueToPreemptableResourceByPartition,
-        selectedCandidates, totalPreemptedResourceAllowed);
+        queueToPreemptableResourceByPartition, selectedCandidates,
+        totalPreemptedResourceAllowed);
 
     for (NodeForPreemption nfp : nodesForPreemption) {
       RMContainer reservedContainer = nfp.schedulerNode.getReservedContainer();
@@ -97,9 +97,8 @@ public class ReservedContainerCandidatesSelector
       }
 
       NodeForPreemption preemptionResult = getPreemptionCandidatesOnNode(
-          nfp.schedulerNode, clusterResource,
-          queueToPreemptableResourceByPartition, selectedCandidates,
-          totalPreemptedResourceAllowed, false);
+          nfp.schedulerNode, queueToPreemptableResourceByPartition,
+          selectedCandidates, totalPreemptedResourceAllowed, false);
       if (null != preemptionResult) {
         for (RMContainer c : preemptionResult.selectedContainers) {
           ApplicationAttemptId appId = c.getApplicationAttemptId();
@@ -135,8 +134,7 @@ public class ReservedContainerCandidatesSelector
     return preemptable;
   }
 
-  private boolean tryToPreemptFromQueue(Resource cluster, String queueName,
-      String partitionName,
+  private boolean tryToPreemptFromQueue(String queueName, String partitionName,
       Map<String, Map<String, Resource>> queueToPreemptableResourceByPartition,
       Resource required, Resource totalPreemptionAllowed, boolean readOnly) {
     Resource preemptable = getPreemptableResource(queueName, partitionName,
@@ -145,11 +143,11 @@ public class ReservedContainerCandidatesSelector
       return false;
     }
 
-    if (!Resources.fitsIn(rc, cluster, required, preemptable)) {
+    if (!Resources.fitsIn(rc, required, preemptable)) {
       return false;
     }
 
-    if (!Resources.fitsIn(rc, cluster, required, totalPreemptionAllowed)) {
+    if (!Resources.fitsIn(rc, required, totalPreemptionAllowed)) {
       return false;
     }
 
@@ -165,7 +163,6 @@ public class ReservedContainerCandidatesSelector
   /**
    * Try to check if we can preempt resources for reserved container in given node
    * @param node
-   * @param cluster
    * @param queueToPreemptableResourceByPartition it's a map of
    *                 <queueName, <partition, preemptable-resource>>
    * @param readOnly do we want to modify preemptable resource after we selected
@@ -174,7 +171,7 @@ public class ReservedContainerCandidatesSelector
    * to satisfy reserved resource
    */
   private NodeForPreemption getPreemptionCandidatesOnNode(
-      FiCaSchedulerNode node, Resource cluster,
+      FiCaSchedulerNode node,
       Map<String, Map<String, Resource>> queueToPreemptableResourceByPartition,
       Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
       Resource totalPreemptionAllowed, boolean readOnly) {
@@ -204,8 +201,7 @@ public class ReservedContainerCandidatesSelector
     String partition = node.getPartition();
 
     // Avoid preempt any container if required <= available + killable
-    if (Resources.fitsIn(rc, cluster, reservedContainer.getReservedResource(),
-        cur)) {
+    if (Resources.fitsIn(rc, reservedContainer.getReservedResource(), cur)) {
       return null;
     }
 
@@ -232,9 +228,9 @@ public class ReservedContainerCandidatesSelector
 
       // Can we preempt container c?
       // Check if we have quota to preempt this container
-      boolean canPreempt = tryToPreemptFromQueue(cluster, containerQueueName,
-          partition, queueToPreemptableResourceByPartition,
-          c.getAllocatedResource(), totalPreemptionAllowed, readOnly);
+      boolean canPreempt = tryToPreemptFromQueue(containerQueueName, partition,
+          queueToPreemptableResourceByPartition, c.getAllocatedResource(),
+          totalPreemptionAllowed, readOnly);
 
       // If we can, add to selected container, and change resource accordingly.
       if (canPreempt) {
@@ -246,7 +242,7 @@ public class ReservedContainerCandidatesSelector
           Resources.addTo(totalSelected, c.getAllocatedResource());
         }
         Resources.addTo(cur, c.getAllocatedResource());
-        if (Resources.fitsIn(rc, cluster,
+        if (Resources.fitsIn(rc,
             reservedContainer.getReservedResource(), cur)) {
           canAllocateReservedContainer = true;
           break;
@@ -282,7 +278,7 @@ public class ReservedContainerCandidatesSelector
     return nfp;
   }
 
-  private List<NodeForPreemption> getNodesForPreemption(Resource cluster,
+  private List<NodeForPreemption> getNodesForPreemption(
       Map<String, Map<String, Resource>> queueToPreemptableResourceByPartition,
       Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
       Resource totalPreemptionAllowed) {
@@ -292,7 +288,7 @@ public class ReservedContainerCandidatesSelector
     for (FiCaSchedulerNode node : preemptionContext.getScheduler()
         .getAllNodes()) {
       if (node.getReservedContainer() != null) {
-        NodeForPreemption nfp = getPreemptionCandidatesOnNode(node, cluster,
+        NodeForPreemption nfp = getPreemptionCandidatesOnNode(node,
             queueToPreemptableResourceByPartition, selectedCandidates,
             totalPreemptionAllowed, true);
         if (null != nfp) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index d7c452a..250f4e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -909,7 +909,7 @@ public abstract class AbstractCSQueue implements CSQueue {
           maxResourceLimit = labelManager.getResourceByLabel(
               schedulerContainer.getNodePartition(), cluster);
         }
-        if (!Resources.fitsIn(resourceCalculator, cluster,
+        if (!Resources.fitsIn(resourceCalculator,
             Resources.add(queueUsage.getUsed(partition), netAllocated),
             maxResourceLimit)) {
           if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index f753d31..72dfbdd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -548,10 +548,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
         toKillContainers.add(killableContainer);
         Resources.addTo(availableAndKillable,
                         killableContainer.getAllocatedResource());
-        if (Resources.fitsIn(rc,
-                             clusterResource,
-                             capability,
-                             availableAndKillable)) {
+        if (Resources.fitsIn(rc, capability, availableAndKillable)) {
           // Stop if we find enough spaces
           availableContainers = 1;
           break;
@@ -579,8 +576,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
             // under the limit.
             resourceNeedToUnReserve = capability;
           }
-          unreservedContainer =
-              application.findNodeToUnreserve(clusterResource, node,
+          unreservedContainer = application.findNodeToUnreserve(node,
                   schedulerKey, resourceNeedToUnReserve);
           // When (minimum-unreserved-resource > 0 OR we cannot allocate
           // new/reserved

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 17bb104..a12c5ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -314,7 +314,6 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
   }
 
   private boolean commonCheckContainerAllocation(
-      Resource cluster,
       ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> allocation,
       SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> schedulerContainer) {
     // Make sure node is not reserved by anyone else
@@ -355,8 +354,7 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
         }
       }
     }
-    if (!Resources.fitsIn(rc, cluster,
-        allocation.getAllocatedOrReservedResource(),
+    if (!Resources.fitsIn(rc, allocation.getAllocatedOrReservedResource(),
         availableResource)) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Node doesn't have enough available resource, asked="
@@ -419,8 +417,7 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
 
           // Common part of check container allocation regardless if it is a
           // increase container or regular container
-          commonCheckContainerAllocation(cluster, allocation,
-              schedulerContainer);
+          commonCheckContainerAllocation(allocation, schedulerContainer);
         } else {
           // Container reserved first time will be NEW, after the container
           // accepted & confirmed, it will become RESERVED state
@@ -721,9 +718,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
   }
 
   @VisibleForTesting
-  public NodeId getNodeIdToUnreserve(
-      SchedulerRequestKey schedulerKey, Resource resourceNeedUnreserve,
-      ResourceCalculator rc, Resource clusterResource) {
+  public NodeId getNodeIdToUnreserve(SchedulerRequestKey schedulerKey,
+      Resource resourceNeedUnreserve, ResourceCalculator resourceCalculator) {
     // first go around make this algorithm simple and just grab first
     // reservation that has enough resources
     Map<NodeId, RMContainer> reservedContainers = this.reservedContainers.get(
@@ -738,7 +734,7 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
 
         // make sure we unreserve one with at least the same amount of
         // resources, otherwise could affect capacity limits
-        if (Resources.fitsIn(rc, clusterResource, resourceNeedUnreserve,
+        if (Resources.fitsIn(resourceCalculator, resourceNeedUnreserve,
             reservedResource)) {
           if (LOG.isDebugEnabled()) {
             LOG.debug(
@@ -806,14 +802,13 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
   }
 
   @VisibleForTesting
-  public RMContainer findNodeToUnreserve(Resource clusterResource,
-      FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
-      Resource minimumUnreservedResource) {
+  public RMContainer findNodeToUnreserve(FiCaSchedulerNode node,
+      SchedulerRequestKey schedulerKey, Resource minimumUnreservedResource) {
     try {
       readLock.lock();
       // need to unreserve some other container first
       NodeId idToUnreserve = getNodeIdToUnreserve(schedulerKey,
-          minimumUnreservedResource, rc, clusterResource);
+          minimumUnreservedResource, rc);
       if (idToUnreserve == null) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("checked to see if could unreserve for app but nothing "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
index 5e6548b..32f022f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
@@ -896,8 +896,7 @@ public class TestReservations {
     String host_1 = "host_1";
     FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0,
         8 * GB);
-    
-    Resource clusterResource = Resources.createResource(2 * 8 * GB);
+
 
     // Setup resource-requests
     Priority p = TestUtils.createMockPriority(5);
@@ -933,28 +932,27 @@ public class TestReservations {
         node_0.getNodeID(), "user", rmContext);
 
     // no reserved containers
-    NodeId unreserveId =
-        app_0.getNodeIdToUnreserve(priorityMap, capability,
-            cs.getResourceCalculator(), clusterResource);
+    NodeId unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
+            cs.getResourceCalculator());
     assertEquals(null, unreserveId);
 
     // no reserved containers - reserve then unreserve
     app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
     app_0.unreserve(priorityMap, node_0, rmContainer_1);
     unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
-        cs.getResourceCalculator(), clusterResource);
+        cs.getResourceCalculator());
     assertEquals(null, unreserveId);
 
     // no container large enough is reserved
     app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
     unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
-        cs.getResourceCalculator(), clusterResource);
+        cs.getResourceCalculator());
     assertEquals(null, unreserveId);
 
     // reserve one that is now large enough
     app_0.reserve(node_1, priorityMap, rmContainer, container);
     unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
-        cs.getResourceCalculator(), clusterResource);
+        cs.getResourceCalculator());
     assertEquals(node_1.getNodeID(), unreserveId);
   }
 
@@ -1001,16 +999,14 @@ public class TestReservations {
         node_1.getNodeID(), "user", rmContext);
 
     // nothing reserved
-    RMContainer toUnreserveContainer =
-        app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1,
+    RMContainer toUnreserveContainer = app_0.findNodeToUnreserve(node_1,
             priorityMap, capability);
     assertTrue(toUnreserveContainer == null);
 
     // reserved but scheduler doesn't know about that node.
     app_0.reserve(node_1, priorityMap, rmContainer, container);
     node_1.reserveResource(app_0, priorityMap, rmContainer);
-    toUnreserveContainer =
-        app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1,
+    toUnreserveContainer = app_0.findNodeToUnreserve(node_1,
             priorityMap, capability);
     assertTrue(toUnreserveContainer == null);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] hadoop git commit: HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong.

Posted by as...@apache.org.
HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51edaacd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51edaacd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51edaacd

Branch: refs/heads/YARN-6592
Commit: 51edaacd09d86419f99ca96545a1393db1f43f73
Parents: 59830ca
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 19 13:48:23 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 19 13:48:23 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java | 2 +-
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java    | 2 +-
 .../org/apache/hadoop/examples/dancing/DistributedPentomino.java | 4 ++--
 .../main/java/org/apache/hadoop/examples/dancing/Pentomino.java  | 2 +-
 5 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f33ec63..0545bb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3551,8 +3551,8 @@ public class BlockManager implements BlockStatsMXBean {
     List<DatanodeStorageInfo> replicasToDelete = replicator
         .chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes,
             addedNode, delNodeHint);
-    for (DatanodeStorageInfo choosenReplica : replicasToDelete) {
-      processChosenExcessRedundancy(nonExcess, choosenReplica, storedBlock);
+    for (DatanodeStorageInfo chosenReplica : replicasToDelete) {
+      processChosenExcessRedundancy(nonExcess, chosenReplica, storedBlock);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index b6c1318..1860565 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -178,7 +178,7 @@ public class TestDeadDatanode {
         clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7,
         BlockType.CONTIGUOUS, null, null);
     for (DatanodeStorageInfo datanodeStorageInfo : results) {
-      assertFalse("Dead node should not be choosen", datanodeStorageInfo
+      assertFalse("Dead node should not be chosen", datanodeStorageInfo
           .getDatanodeDescriptor().equals(clientNode));
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
index 537b4d4..eef4461 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
@@ -368,7 +368,7 @@ public class DancingLinks<ColumnName> {
 
   /**
    * Make one move from a prefix
-   * @param goalRow the row that should be choosen
+   * @param goalRow the row that should be chosen
    * @return the row that was found
    */
   private Node<ColumnName> advance(int goalRow) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
index 29f1eb2..d4fe6dc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
@@ -39,8 +39,8 @@ import com.google.common.base.Charsets;
  * Launch a distributed pentomino solver.
  * It generates a complete list of prefixes of length N with each unique prefix
  * as a separate line. A prefix is a sequence of N integers that denote the 
- * index of the row that is choosen for each column in order. Note that the
- * next column is heuristically choosen by the solver, so it is dependant on
+ * index of the row that is chosen for each column in order. Note that the
+ * next column is heuristically chosen by the solver, so it is dependant on
  * the previous choice. That file is given as the input to
  * map/reduce. The output key/value are the move prefix/solution as Text/Text.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
index 2485728..a30d62c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
@@ -411,7 +411,7 @@ public class Pentomino {
   /**
    * Find all of the solutions that start with the given prefix. The printer
    * is given each solution as it is found.
-   * @param split a list of row indexes that should be choosen for each row
+   * @param split a list of row indexes that should be chosen for each row
    *        in order
    * @return the number of solutions found
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] hadoop git commit: HDFS-12470. DiskBalancer: Some tests create plan files under system directory. Contributed by Hanisha Koneru.

Posted by as...@apache.org.
HDFS-12470. DiskBalancer: Some tests create plan files under system directory. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2dcba18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2dcba18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2dcba18

Branch: refs/heads/YARN-6592
Commit: a2dcba18531c6fa4b76325f5132773f12ddfc6d5
Parents: a4f9c7c
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Sep 18 09:53:24 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Sep 18 09:53:24 2017 -0700

----------------------------------------------------------------------
 .../server/diskbalancer/command/TestDiskBalancerCommand.java    | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2dcba18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index b0b0b0c..1cebae0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -476,9 +476,12 @@ public class TestDiskBalancerCommand {
   public void testPlanJsonNode() throws Exception {
     final String planArg = String.format("-%s %s", PLAN,
         "a87654a9-54c7-4693-8dd9-c9c7021dc340");
+    final Path testPath = new Path(
+        PathUtils.getTestPath(getClass()),
+        GenericTestUtils.getMethodName());
     final String cmdLine = String
         .format(
-            "hdfs diskbalancer %s", planArg);
+            "hdfs diskbalancer -out %s %s", testPath, planArg);
     runCommand(cmdLine);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] hadoop git commit: HDFS-12473. Change hosts JSON file format.

Posted by as...@apache.org.
HDFS-12473. Change hosts JSON file format.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/230b85d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/230b85d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/230b85d5

Branch: refs/heads/YARN-6592
Commit: 230b85d5865b7e08fb7aaeab45295b5b966011ef
Parents: 7e58b24
Author: Ming Ma <mi...@twitter.com>
Authored: Wed Sep 20 09:03:59 2017 -0700
Committer: Ming Ma <mi...@twitter.com>
Committed: Wed Sep 20 09:03:59 2017 -0700

----------------------------------------------------------------------
 .../hdfs/util/CombinedHostsFileReader.java      | 67 ++++++++++++++------
 .../hdfs/util/CombinedHostsFileWriter.java      | 23 ++++---
 .../CombinedHostFileManager.java                |  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 44 ++++++++-----
 .../src/test/resources/dfs.hosts.json           | 16 +++--
 .../src/test/resources/legacy.dfs.hosts.json    |  7 ++
 6 files changed, 102 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 8da5655..aa8e4c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -19,58 +19,85 @@
 package org.apache.hadoop.hdfs.util;
 
 import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
+
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(DatanodeAdminProperties.class);
-  private static final JsonFactory JSON_FACTORY = new JsonFactory();
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(CombinedHostsFileReader.class);
 
   private CombinedHostsFileReader() {
   }
 
   /**
    * Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
    * @return the set of DatanodeAdminProperties
    * @throws IOException
    */
-  public static Set<DatanodeAdminProperties>
+  public static DatanodeAdminProperties[]
       readFile(final String hostsFile) throws IOException {
-    HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
+    DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+    ObjectMapper objectMapper = new ObjectMapper();
+    boolean tryOldFormat = false;
     try (Reader input =
-         new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-      Iterator<DatanodeAdminProperties> iterator =
-          READER.readValues(JSON_FACTORY.createParser(input));
-      while (iterator.hasNext()) {
-        DatanodeAdminProperties properties = iterator.next();
-        allDNs.add(properties);
+        new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
+      allDNs = objectMapper.readValue(input, DatanodeAdminProperties[].class);
+    } catch (JsonMappingException jme) {
+      // The old format doesn't have json top-level token to enclose the array.
+      // For backward compatibility, try parsing the old format.
+      tryOldFormat = true;
+      LOG.warn("{} has invalid JSON format." +
+          "Try the old format without top-level token defined.", hostsFile);
+    }
+
+    if (tryOldFormat) {
+      ObjectReader objectReader =
+          objectMapper.readerFor(DatanodeAdminProperties.class);
+      JsonFactory jsonFactory = new JsonFactory();
+      List<DatanodeAdminProperties> all = new ArrayList<>();
+      try (Reader input =
+          new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
+        Iterator<DatanodeAdminProperties> iterator =
+            objectReader.readValues(jsonFactory.createParser(input));
+        while (iterator.hasNext()) {
+          DatanodeAdminProperties properties = iterator.next();
+          all.add(properties);
+        }
       }
+      allDNs = all.toArray(new DatanodeAdminProperties[all.size()]);
     }
     return allDNs;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java
index d771191..2d9bbf5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java
@@ -32,20 +32,21 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
 /**
- * Writer support for JSON based datanode configuration, an alternative
+ * Writer support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileWriter {
-  private static final ObjectMapper MAPPER = new ObjectMapper();
   private CombinedHostsFileWriter() {
   }
 
@@ -57,13 +58,11 @@ public final class CombinedHostsFileWriter {
    */
   public static void writeFile(final String hostsFile,
       final Set<DatanodeAdminProperties> allDNs) throws IOException {
-    StringBuilder configs = new StringBuilder();
+    final ObjectMapper objectMapper = new ObjectMapper();
+
     try (Writer output =
        new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) {
-      for (DatanodeAdminProperties datanodeAdminProperties: allDNs) {
-        configs.append(MAPPER.writeValueAsString(datanodeAdminProperties));
-      }
-      output.write(configs.toString());
+      objectMapper.writeValue(output, allDNs);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
index 6f9c35e..d6a0972 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
@@ -39,7 +39,6 @@ import java.net.InetSocketAddress;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.Map;
-import java.util.Set;
 
 import com.google.common.base.Predicate;
 
@@ -212,7 +211,7 @@ public class CombinedHostFileManager extends HostConfigManager {
   }
   private void refresh(final String hostsFile) throws IOException {
     HostProperties hostProps = new HostProperties();
-    Set<DatanodeAdminProperties> all =
+    DatanodeAdminProperties[] all =
         CombinedHostsFileReader.readFile(hostsFile);
     for(DatanodeAdminProperties properties : all) {
       InetSocketAddress addr = parseEntry(hostsFile,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
index b48784f..cf02180 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.util;
 import java.io.File;
 import java.io.FileWriter;
 
-import java.util.Set;
-
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
@@ -30,19 +28,21 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 
-/*
- * Test for JSON based HostsFileReader
+/**
+ * Test for JSON based HostsFileReader.
  */
 public class TestCombinedHostsFileReader {
 
   // Using /test/build/data/tmp directory to store temporary files
-  static final String HOSTS_TEST_DIR = GenericTestUtils.getTestDir()
+  static final String HOSTSTESTDIR = GenericTestUtils.getTestDir()
       .getAbsolutePath();
-  File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json");
+  private final File newFile = new File(HOSTSTESTDIR, "dfs.hosts.new.json");
 
-  static final String TEST_CACHE_DATA_DIR =
+  static final String TESTCACHEDATADIR =
       System.getProperty("test.cache.data", "build/test/cache");
-  File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json");
+  private final File jsonFile = new File(TESTCACHEDATADIR, "dfs.hosts.json");
+  private final File legacyFile =
+      new File(TESTCACHEDATADIR, "legacy.dfs.hosts.json");
 
   @Before
   public void setUp() throws Exception {
@@ -51,18 +51,28 @@ public class TestCombinedHostsFileReader {
   @After
   public void tearDown() throws Exception {
     // Delete test file after running tests
-    NEW_FILE.delete();
+    newFile.delete();
+
+  }
 
+  /*
+   * Load the legacy test json file
+   */
+  @Test
+  public void testLoadLegacyJsonFile() throws Exception {
+    DatanodeAdminProperties[] all =
+        CombinedHostsFileReader.readFile(legacyFile.getAbsolutePath());
+    assertEquals(7, all.length);
   }
 
   /*
-   * Load the existing test json file
+   * Load the test json file
    */
   @Test
   public void testLoadExistingJsonFile() throws Exception {
-    Set<DatanodeAdminProperties> all =
-        CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath());
-    assertEquals(7, all.size());
+    DatanodeAdminProperties[] all =
+        CombinedHostsFileReader.readFile(jsonFile.getAbsolutePath());
+    assertEquals(7, all.length);
   }
 
   /*
@@ -70,11 +80,11 @@ public class TestCombinedHostsFileReader {
    */
   @Test
   public void testEmptyCombinedHostsFileReader() throws Exception {
-    FileWriter hosts = new FileWriter(NEW_FILE);
+    FileWriter hosts = new FileWriter(newFile);
     hosts.write("");
     hosts.close();
-    Set<DatanodeAdminProperties> all =
-        CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
-    assertEquals(0, all.size());
+    DatanodeAdminProperties[] all =
+        CombinedHostsFileReader.readFile(newFile.getAbsolutePath());
+    assertEquals(0, all.length);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
index 9c852e0..615b17f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
@@ -1,7 +1,9 @@
-{"hostName": "host1"}
-{"hostName": "host2", "upgradeDomain": "ud0"}
-{"hostName": "host3", "adminState": "DECOMMISSIONED"}
-{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
-{"hostName": "host5", "port": 8090}
-{"hostName": "host6", "adminState": "IN_MAINTENANCE"}
-{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}
+[
+  {"hostName": "host1"},
+  {"hostName": "host2", "upgradeDomain": "ud0"},
+  {"hostName": "host3", "adminState": "DECOMMISSIONED"},
+  {"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"},
+  {"hostName": "host5", "port": 8090},
+  {"hostName": "host6", "adminState": "IN_MAINTENANCE"},
+  {"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}
+]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/legacy.dfs.hosts.json
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/legacy.dfs.hosts.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/legacy.dfs.hosts.json
new file mode 100644
index 0000000..9c852e0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/legacy.dfs.hosts.json
@@ -0,0 +1,7 @@
+{"hostName": "host1"}
+{"hostName": "host2", "upgradeDomain": "ud0"}
+{"hostName": "host3", "adminState": "DECOMMISSIONED"}
+{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
+{"hostName": "host5", "port": 8090}
+{"hostName": "host6", "adminState": "IN_MAINTENANCE"}
+{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] hadoop git commit: HADOOP-13714. Tighten up our compatibility guidelines for Hadoop 3

Posted by as...@apache.org.
HADOOP-13714. Tighten up our compatibility guidelines for Hadoop 3


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7618fa91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7618fa91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7618fa91

Branch: refs/heads/YARN-6592
Commit: 7618fa9194b40454405f11a25bec4e2d79506912
Parents: 38c14ef
Author: Daniel Templeton <te...@apache.org>
Authored: Sat Sep 16 09:20:33 2017 +0200
Committer: Daniel Templeton <te...@apache.org>
Committed: Sat Sep 16 09:20:33 2017 +0200

----------------------------------------------------------------------
 .../src/site/markdown/Compatibility.md          | 645 +++++++++++++++----
 .../site/markdown/InterfaceClassification.md    | 227 ++++---
 2 files changed, 675 insertions(+), 197 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7618fa91/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 05b18b5..4fa8c02 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -20,109 +20,276 @@ Apache Hadoop Compatibility
 Purpose
 -------
 
-This document captures the compatibility goals of the Apache Hadoop project. The different types of compatibility between Hadoop releases that affects Hadoop developers, downstream projects, and end-users are enumerated. For each type of compatibility we:
+This document captures the compatibility goals of the Apache Hadoop project.
+The different types of compatibility between Hadoop releases that affect
+Hadoop developers, downstream projects, and end-users are enumerated. For each
+type of compatibility this document will:
 
 * describe the impact on downstream projects or end-users
 * where applicable, call out the policy adopted by the Hadoop developers when incompatible changes are permitted.
 
+All Hadoop interfaces are classified according to the intended audience and
+stability in order to maintain compatibility with previous releases. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details
+about the classifications.
+
+### Target Audience
+
+This document is intended for consumption by the Hadoop developer community.
+This document describes the lens through which changes to the Hadoop project
+should be viewed. In order for end users and third party developers to have
+confidence about cross-release compatibility, the developer community must
+ensure that development efforts adhere to these policies. It is the
+responsibility of the project committers to validate that all changes either
+maintain compatibility or are explicitly marked as incompatible.
+
+Within a component Hadoop developers are free to use Private and Limited Private
+APIs, but when using components from a different module Hadoop developers
+should follow the same guidelines as third-party developers: do not
+use Private or Limited Private (unless explicitly allowed) interfaces and
+prefer instead Stable interfaces to Evolving or Unstable interfaces where
+possible. Where not possible, the preferred solution is to expand the audience
+of the API rather than introducing or perpetuating an exception to these
+compatibility guidelines. When working within a Maven module Hadoop developers
+should observe where possible the same level of restraint with regard to
+using components located in other Maven modules.
+
+Above all, Hadoop developers must be mindful of the impact of their changes.
+Stable interfaces must not change between major releases. Evolving interfaces
+must not change between minor releases. New classes and components must be
+labeled appropriately for audience and stability. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details about
+when the various labels are appropriate. As a general rule, all new interfaces
+and APIs should have the most limited labels (e.g. Private Unstable) that will
+not inhibit the intent of the interface or API.
+
+### Notational Conventions
+
+The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
+"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as
+described in [RFC 2119](http://tools.ietf.org/html/rfc2119).
+
+Deprecation
+-----------
+
+The Java API provides a @Deprecated annotation to mark an API element as
+flagged for removal. The standard meaning of the annotation is that the
+API element should not be used and may be removed in a later version.
+
+In all cases removing an element from an API is an incompatible
+change. In the case of [Stable](./InterfaceClassification.html#Stable) APIs,
+the change cannot be made between minor releases within the same major
+version. In addition, to allow consumers of the API time to adapt to the change,
+the API element to be removed should be marked as deprecated for a full major
+release before it is removed. For example, if a method is marked as deprecated
+in Hadoop 2.8, it cannot be removed until Hadoop 4.0.
+
+### Policy
+
+[Stable](./InterfaceClassification.html#Stable) API elements MUST NOT be removed
+until they have been marked as deprecated (through the @Deprecated annotation or
+other appropriate documentation) for a full major release. In the case that an
+API element was introduced as deprecated (to indicate that it is a temporary
+measure that is intended to be removed) the API element MAY be removed in the
+following major release. When modifying a
+[Stable](./InterfaceClassification.html#Stable) API, developers SHOULD prefer
+introducing a new method or endpoint and deprecating the existing one to making
+incompatible changes to the method or endpoint.
+
 Compatibility types
 -------------------
 
 ### Java API
 
-Hadoop interfaces and classes are annotated to describe the intended audience and stability in order to maintain compatibility with previous releases. See [Hadoop Interface Classification](./InterfaceClassification.html) for details.
-
-* InterfaceAudience: captures the intended audience, possible values are Public (for end users and external projects), LimitedPrivate (for other Hadoop components, and closely related projects like YARN, MapReduce, HBase etc.), and Private (for intra component use).
-* InterfaceStability: describes what types of interface changes are permitted. Possible values are Stable, Evolving, Unstable, and Deprecated.
+Developers SHOULD annotate all Hadoop interfaces and classes with the
+@InterfaceAudience and @InterfaceStability annotations to describe the
+intended audience and stability. Annotations may be at the package, class, or
+member variable or method level. Member variable and method annotations SHALL
+override class annotations, and class annotations SHALL override package
+annotations. A package, class, or member variable or method that is not
+annotated SHALL be interpreted as implicitly
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable).
+
+* @InterfaceAudience captures the intended audience. Possible values are
+[Public](./InterfaceClassification.html#Public) (for end users and external
+projects), Limited[Private](./InterfaceClassification.html#Private) (for other
+Hadoop components, and closely related projects like YARN, MapReduce, HBase
+etc.), and [Private](./InterfaceClassification.html#Private)
+(for intra component use).
+* @InterfaceStability describes what types of interface changes are permitted. Possible values are [Stable](./InterfaceClassification.html#Stable), [Evolving](./InterfaceClassification.html#Evolving), and [Unstable](./InterfaceClassification.html#Unstable).
+* @Deprecated notes that the package, class, or member variable or method could potentially be removed in the future and should not be used.
 
 #### Use Cases
 
-* Public-Stable API compatibility is required to ensure end-user programs and downstream projects continue to work without modification.
-* LimitedPrivate-Stable API compatibility is required to allow upgrade of individual components across minor releases.
-* Private-Stable API compatibility is required for rolling upgrades.
+* [Public](./InterfaceClassification.html#Public)-[Stable](./InterfaceClassification.html#Stable) API compatibility is required to ensure end-user programs and downstream projects continue to work without modification.
+* [Public](./InterfaceClassification.html#Public)-[Evolving](./InterfaceClassification.html#Evolving) API compatibility is useful to make functionality available for consumption before it is fully baked.
+* Limited Private-[Stable](./InterfaceClassification.html#Stable) API compatibility is required to allow upgrade of individual components across minor releases.
+* [Private](./InterfaceClassification.html#Private)-[Stable](./InterfaceClassification.html#Stable) API compatibility is required for rolling upgrades.
+* [Private](./InterfaceClassification.html#Private)-[Unstable](./InterfaceClassification.html#Unstable) API compatibility allows internal components to evolve rapidly without concern for downstream consumers, and is how most interfaces should be labeled.
 
 #### Policy
 
-* Public-Stable APIs must be deprecated for at least one major release prior to their removal in a major release.
-* LimitedPrivate-Stable APIs can change across major releases, but not within a major release.
-* Private-Stable APIs can change across major releases, but not within a major release.
-* Classes not annotated are implicitly "Private". Class members not annotated inherit the annotations of the enclosing class.
-* Note: APIs generated from the proto files need to be compatible for rolling-upgrades. See the section on wire-compatibility for more details. The compatibility policies for APIs and wire-communication need to go hand-in-hand to address this.
-
-### Semantic compatibility
-
-Apache Hadoop strives to ensure that the behavior of APIs remains consistent over versions, though changes for correctness may result in changes in behavior. Tests and javadocs specify the API's behavior. The community is in the process of specifying some APIs more rigorously, and enhancing test suites to verify compliance with the specification, effectively creating a formal specification for the subset of behaviors that can be easily tested.
+The compatibility policy SHALL be determined by the relevant package, class, or
+member variable or method annotations.
+
+Note: APIs generated from the proto files MUST be compatible for rolling
+upgrades. See the section on wire protocol compatibility for more details. The
+compatibility policies for APIs and wire protocols must therefore go hand
+in hand.
+
+#### Semantic compatibility
+
+Apache Hadoop strives to ensure that the behavior of APIs remains consistent
+over versions, though changes for correctness may result in changes in
+behavior. API behavior SHALL be specified by the JavaDoc API documentation
+where present and complete. When JavaDoc API documentation is not available,
+behavior SHALL be specified by the behavior expected by the related unit tests.
+In cases with no JavaDoc API documentation or unit test coverage, the expected
+behavior is presumed to be obvious and SHOULD be assumed to be the minimum
+functionality implied by the interface naming. The community is in the process
+of specifying some APIs more rigorously and enhancing test suites to verify
+compliance with the specification, effectively creating a formal specification
+for the subset of behaviors that can be easily tested.
+
+The behavior of any API MAY be changed to fix incorrect behavior according to
+the stability of the API, with such a change to be accompanied by updating
+existing documentation and tests and/or adding new documentation or tests.
+
+#### Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
+
+Apache Hadoop revisions SHOUD retain binary compatability such that end-user
+applications continue to work without any modifications. Minor Apache Hadoop
+revisions within the same major revision MUST retain compatibility such that
+existing MapReduce applications (e.g. end-user applications and projects such
+as Apache Pig, Apache Hive, et al), existing YARN applications (e.g.
+end-user applications and projects such as Apache Spark, Apache Tez et al),
+and applications that accesses HDFS directly (e.g. end-user applications and
+projects such as Apache HBase, Apache Flume, et al) work unmodified and without
+recompilation when used with any Apache Hadoop cluster within the same major
+release as the original build target.
+
+For MapReduce applications in particular, i.e. applications using the
+org.apache.hadoop.mapred and/or org.apache.hadoop.mapreduce APIs, the developer
+community SHALL support binary compatibility across major releases. The
+MapReduce APIs SHALL be supported compatibly across major releases. See
+[Compatibility for MapReduce applications between hadoop-1.x and hadoop-2.x](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html) for more details.
+
+Some applications may be affected by changes to disk layouts or other internal
+changes. See the sections that follow for policies on how incompatible
+changes to non-API interfaces are handled.
+
+### Native Dependencies
+
+Hadoop includes several native components, including compression, the
+container executor binary, and various native integrations. These native
+components introduce a set of native dependencies for Hadoop, both at compile
+time and at runtime, such as cmake, gcc, zlib, etc. This set of native
+dependencies is part of the Hadoop ABI.
 
 #### Policy
 
-The behavior of API may be changed to fix incorrect behavior, such a change to be accompanied by updating existing buggy tests or adding tests in cases there were none prior to the change.
+The minimum required versions of the native components on which Hadoop depends
+at compile time and/or runtime SHALL be considered
+[Stable](./InterfaceClassification.html#Stable). Changes to the minimum
+required versions MUST NOT increase between minor releases within a major
+version.
 
-### Wire compatibility
+### Wire Protocols
 
-Wire compatibility concerns data being transmitted over the wire between Hadoop processes. Hadoop uses Protocol Buffers for most RPC communication. Preserving compatibility requires prohibiting modification as described below. Non-RPC communication should be considered as well, for example using HTTP to transfer an HDFS image as part of snapshotting or transferring MapTask output. The potential communications can be categorized as follows:
+Wire compatibility concerns data being transmitted "over the wire" between
+Hadoop processes. Hadoop uses
+[Protocol Buffers](https://developers.google.com/protocol-buffers/) for most
+RPC communication. Preserving compatibility requires prohibiting modification
+as described below. Non-RPC communication should be considered as well, for
+example using HTTP to transfer an HDFS image as part of snapshotting or
+transferring MapReduce map task output. The communications can be categorized as
+follows:
 
 * Client-Server: communication between Hadoop clients and servers (e.g., the HDFS client to NameNode protocol, or the YARN client to ResourceManager protocol).
-* Client-Server (Admin): It is worth distinguishing a subset of the Client-Server protocols used solely by administrative commands (e.g., the HAAdmin protocol) as these protocols only impact administrators who can tolerate changes that end users (which use general Client-Server protocols) can not.
+* Client-Server (Admin): It is worth distinguishing a subset of the Client-Server protocols used solely by administrative commands (e.g., the HAAdmin protocol) as these protocols only impact administrators who can tolerate changes that end users (which use general Client-Server protocols) cannot.
 * Server-Server: communication between servers (e.g., the protocol between the DataNode and NameNode, or NodeManager and ResourceManager)
 
-#### Use Cases
+#### Protocol Dependencies
 
-* Client-Server compatibility is required to allow users to continue using the old clients even after upgrading the server (cluster) to a later version (or vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 cluster.
-* Client-Server compatibility is also required to allow users to upgrade the client before upgrading the server (cluster). For example, a Hadoop 2.4.0 client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side bug fixes ahead of full cluster upgrades. Note that new cluster features invoked by new client APIs or shell commands will not be usable. YARN applications that attempt to use new APIs (including new fields in data structures) that have not yet been deployed to the cluster can expect link exceptions.
-* Client-Server compatibility is also required to allow upgrading individual components without upgrading others. For example, upgrade HDFS from version 2.1.0 to 2.2.0 without upgrading MapReduce.
-* Server-Server compatibility is required to allow mixed versions within an active cluster so the cluster may be upgraded without downtime in a rolling fashion.
+The components of Apache Hadoop may have dependencies that include their own
+protocols, such as Zookeeper, S3, Kerberos, etc. These protocol dependencies
+SHALL be treated as internal protocols and governed by the same policy.
 
-#### Policy
+#### Transports
+
+In addition to compatibility of the protocols themselves, maintaining
+cross-version communications requires that the transports supported also be
+stable. The most likely source of transport changes stems from secure
+transports, such as SSL. Upgrading a service from SSLv2 to SSLv3 may break
+existing SSLv2 clients. The minimum supported major version of any transports
+MUST not increase across minor releases within a major version.
 
-* Both Client-Server and Server-Server compatibility is preserved within a major release. (Different policies for different categories are yet to be considered.)
-* Compatibility can be broken only at a major release, though breaking compatibility even at major releases has grave consequences and should be discussed in the Hadoop community.
-* Hadoop protocols are defined in .proto (ProtocolBuffers) files. Client-Server protocols and Server-Server protocol .proto files are marked as stable. When a .proto file is marked as stable it means that changes should be made in a compatible fashion as described below:
-    * The following changes are compatible and are allowed at any time:
-        * Add an optional field, with the expectation that the code deals with the field missing due to communication with an older version of the code.
-        * Add a new rpc/method to the service
-        * Add a new optional request to a Message
-        * Rename a field
-        * Rename a .proto file
-        * Change .proto annotations that effect code generation (e.g. name of java package)
-    * The following changes are incompatible but can be considered only at a major release
-        * Change the rpc/method name
-        * Change the rpc/method parameter type or return type
-        * Remove an rpc/method
-        * Change the service name
-        * Change the name of a Message
-        * Modify a field type in an incompatible way (as defined recursively)
-        * Change an optional field to required
-        * Add or delete a required field
-        * Delete an optional field as long as the optional field has reasonable defaults to allow deletions
-    * The following changes are incompatible and hence never allowed
-        * Change a field id
-        * Reuse an old field that was previously deleted.
-        * Field numbers are cheap and changing and reusing is not a good idea.
-
-### Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
-
-As Apache Hadoop revisions are upgraded end-users reasonably expect that their applications should continue to work without any modifications. This is fulfilled as a result of supporting API compatibility, Semantic compatibility and Wire compatibility.
-
-However, Apache Hadoop is a very complex, distributed system and services a very wide variety of use-cases. In particular, Apache Hadoop MapReduce is a very, very wide API; in the sense that end-users may make wide-ranging assumptions such as layout of the local disk when their map/reduce tasks are executing, environment variables for their tasks etc. In such cases, it becomes very hard to fully specify, and support, absolute compatibility.
-
-#### Use cases
-
-* Existing MapReduce applications, including jars of existing packaged end-user applications and projects such as Apache Pig, Apache Hive, Cascading etc. should work unmodified when pointed to an upgraded Apache Hadoop cluster within a major release.
-* Existing YARN applications, including jars of existing packaged end-user applications and projects such as Apache Tez etc. should work unmodified when pointed to an upgraded Apache Hadoop cluster within a major release.
-* Existing applications which transfer data in/out of HDFS, including jars of existing packaged end-user applications and frameworks such as Apache Flume, should work unmodified when pointed to an upgraded Apache Hadoop cluster within a major release.
+Service ports are considered as part of the transport mechanism. Fixed
+service port numbers MUST be kept consistent to prevent breaking clients.
 
 #### Policy
 
-* Existing MapReduce, YARN & HDFS applications and frameworks should work unmodified within a major release i.e. Apache Hadoop ABI is supported.
-* A very minor fraction of applications maybe affected by changes to disk layouts etc., the developer community will strive to minimize these changes and will not make them within a minor version. In more egregious cases, we will consider strongly reverting these breaking changes and invalidating offending releases if necessary.
-* In particular for MapReduce applications, the developer community will try our best to support providing binary compatibility across major releases e.g. applications using org.apache.hadoop.mapred.
-* APIs are supported compatibly across hadoop-1.x and hadoop-2.x. See [Compatibility for MapReduce applications between hadoop-1.x and hadoop-2.x](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html) for more details.
+Hadoop wire protocols are defined in .proto (ProtocolBuffers) files.
+Client-Server and Server-Server protocols SHALL be classified according to the
+audience and stability classifications noted in their .proto files. In cases
+where no classifications are present, the protocols SHOULD be assumed to be
+[Private](./InterfaceClassification.html#Private) and
+[Stable](./InterfaceClassification.html#Stable).
+
+The following changes to a .proto file SHALL be considered compatible:
+
+* Add an optional field, with the expectation that the code deals with the field missing due to communication with an older version of the code
+* Add a new rpc/method to the service
+* Add a new optional request to a Message
+* Rename a field
+* Rename a .proto file
+* Change .proto annotations that effect code generation (e.g. name of java package)
+
+The following changes to a .proto file SHALL be considered incompatible:
+
+* Change an rpc/method name
+* Change an rpc/method parameter type or return type
+* Remove an rpc/method
+* Change the service name
+* Change the name of a Message
+* Modify a field type in an incompatible way (as defined recursively)
+* Change an optional field to required
+* Add or delete a required field
+* Delete an optional field as long as the optional field has reasonable defaults to allow deletions
+
+The following changes to a .proto file SHALL be considered incompatible:
+
+* Change a field id
+* Reuse an old field that was previously deleted.
+
+Hadoop wire protocols that are not defined via .proto files SHOULD be considered
+to be [Private](./InterfaceClassification.html#Private) and
+[Stable](./InterfaceClassification.html#Stable).
+
+In addition to the limitations imposed by being
+[Stable](./InterfaceClassification.html#Stable), Hadoop's wire protocols
+MUST also be forward compatible across minor releases within a major version
+according to the following:
+
+* Client-Server compatibility MUST be maintained so as to allow users to continue using older clients even after upgrading the server (cluster) to a later version (or vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 cluster.
+* Client-Server compatibility MUST be maintained so as to allow users to upgrade the client before upgrading the server (cluster). For example, a Hadoop 2.4.0 client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side bug fixes ahead of full cluster upgrades. Note that new cluster features invoked by new client APIs or shell commands will not be usable. YARN applications that attempt to use new APIs (including new fields in data structures) that have not yet been deployed to the cluster can expect link exceptions.
+* Client-Server compatibility MUST be maintained so as to allow upgrading individual components without upgrading others. For example, upgrade HDFS from version 2.1.0 to 2.2.0 without upgrading MapReduce.
+* Server-Server compatibility MUST be maintained so as to allow mixed versions within an active cluster so the cluster may be upgraded without downtime in a rolling fashion.
+
+New transport mechanisms MUST only be introduced with minor or major version
+changes. Existing transport mechanisms MUST continue to be supported across
+minor versions within a major version. Service port numbers MUST remain
+consistent across minor version numbers within a major version.
 
 ### REST APIs
 
-REST API compatibility corresponds to both the requests (URLs) and responses to each request (content, which may contain other URLs). Hadoop REST APIs are specifically meant for stable use by clients across releases, even major ones. The following are the exposed REST APIs:
+REST API compatibility applies to the REST endpoints (URLs) and response data
+format. Hadoop REST APIs are specifically meant for stable use by clients across
+releases, even major ones. The following is a non-exhaustive list of the
+exposed REST APIs:
 
-* [WebHDFS](../hadoop-hdfs/WebHDFS.html) - Stable
+* [WebHDFS](../hadoop-hdfs/WebHDFS.html)
 * [ResourceManager](../../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html)
 * [NodeManager](../../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html)
 * [MR Application Master](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredAppMasterRest.html)
@@ -130,134 +297,390 @@ REST API compatibility corresponds to both the requests (URLs) and responses to
 * [Timeline Server v1 REST API](../../hadoop-yarn/hadoop-yarn-site/TimelineServer.html)
 * [Timeline Service v2 REST API](../../hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html)
 
+Each API has an API-specific version number. Any incompatible changes MUST
+increment the API version number.
+
+#### Policy
+
+The Hadoop REST APIs SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving). With respect to API version
+numbers, the Hadoop REST APIs SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable), i.e. no incompatible changes
+are allowed to within an API version number.
+
+### Log Output
+
+The Hadoop daemons and CLIs produce log output via Log4j that is intended to
+aid administrators and developers in understanding and troubleshooting cluster
+behavior. Log messages are intended for human consumption, though automation
+use cases are also supported.
+
 #### Policy
 
-The APIs annotated stable in the text above preserve compatibility across at least one major release, and maybe deprecated by a newer version of the REST API in a major release.
+All log output SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
+
+### Audit Log Output
+
+Several components have audit logging systems that record system information in
+a machine readable format. Incompatible changes to that data format may break
+existing automation utilities. For the audit log, an incompatible change is any
+change that changes the format such that existing parsers no longer can parse
+the logs.
+
+#### Policy
+
+All audit log output SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). Any change to the
+data format SHALL be considered an incompatible change.
 
 ### Metrics/JMX
 
-While the Metrics API compatibility is governed by Java API compatibility, the actual metrics exposed by Hadoop need to be compatible for users to be able to automate using them (scripts etc.). Adding additional metrics is compatible. Modifying (e.g. changing the unit or measurement) or removing existing metrics breaks compatibility. Similarly, changes to JMX MBean object names also break compatibility.
+While the Metrics API compatibility is governed by Java API compatibility, the
+Metrics data format exposed by Hadoop MUST be maintained as compatible for
+consumers of the data, e.g. for automation tasks.
 
 #### Policy
 
-Metrics should preserve compatibility within the major release.
+The data format exposed via Metrics SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable).
 
 ### File formats & Metadata
 
-User and system level data (including metadata) is stored in files of different formats. Changes to the metadata or the file formats used to store data/metadata can lead to incompatibilities between versions.
+User and system level data (including metadata) is stored in files of various
+formats. Changes to the metadata or the file formats used to store
+data/metadata can lead to incompatibilities between versions. Each class of file
+format is addressed below.
 
 #### User-level file formats
 
-Changes to formats that end-users use to store their data can prevent them from accessing the data in later releases, and hence it is highly important to keep those file-formats compatible. One can always add a "new" format improving upon an existing format. Examples of these formats include har, war, SequenceFileFormat etc.
+Changes to formats that end users use to store their data can prevent them from
+accessing the data in later releases, and hence are important to be compatible.
+Examples of these formats include har, war, SequenceFileFormat, etc.
 
 ##### Policy
 
-* Non-forward-compatible user-file format changes are restricted to major releases. When user-file formats change, new releases are expected to read existing formats, but may write data in formats incompatible with prior releases. Also, the community shall prefer to create a new format that programs must opt in to instead of making incompatible changes to existing formats.
+User-level file formats SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). User-lever file
+format changes SHOULD be made forward compatible across major releases and MUST
+be made forward compatible within a major release. The developer community
+SHOULD prefer the creation of a new derivative file format to making
+incompatible changes to an existing file format. Such new file formats MUST be
+created as opt-in, meaning that users must be able to continue using the
+existing compatible format until and unless they explicitly opt in to using
+the new file format.
 
-#### System-internal file formats
+#### System-internal data schemas
 
-Hadoop internal data is also stored in files and again changing these formats can lead to incompatibilities. While such changes are not as devastating as the user-level file formats, a policy on when the compatibility can be broken is important.
+Hadoop internal data may also be stored in files or other data stores. Changing
+the schemas of these data stores can lead to incompatibilities.
 
 ##### MapReduce
 
 MapReduce uses formats like I-File to store MapReduce-specific data.
 
-##### Policy
+###### Policy
 
-MapReduce-internal formats like IFile maintain compatibility within a major release. Changes to these formats can cause in-flight jobs to fail and hence we should ensure newer clients can fetch shuffle-data from old servers in a compatible manner.
+All MapReduce-internal file formats, such as I-File format or the job history
+server's jhist file format, SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Stable](./InterfaceClassification.html#Stable).
 
 ##### HDFS Metadata
 
-HDFS persists metadata (the image and edit logs) in a particular format. Incompatible changes to either the format or the metadata prevent subsequent releases from reading older metadata. Such incompatible changes might require an HDFS "upgrade" to convert the metadata to make it accessible. Some changes can require more than one such "upgrades".
+HDFS persists metadata (the image and edit logs) in a private file format.
+Incompatible changes to either the format or the metadata prevent subsequent
+releases from reading older metadata. Incompatible changes MUST include a
+process by which existing metadata may be upgraded. Changes SHALL be
+allowed to require more than one upgrade. Incompatible changes MUST result in
+the metadata version number being incremented.
 
-Depending on the degree of incompatibility in the changes, the following potential scenarios can arise:
+Depending on the degree of incompatibility in the changes, the following
+potential scenarios can arise:
 
 * Automatic: The image upgrades automatically, no need for an explicit "upgrade".
 * Direct: The image is upgradable, but might require one explicit release "upgrade".
 * Indirect: The image is upgradable, but might require upgrading to intermediate release(s) first.
 * Not upgradeable: The image is not upgradeable.
 
-##### Policy
-
-* A release upgrade must allow a cluster to roll-back to the older version and its older disk format. The rollback needs to restore the original data, but not required to restore the updated data.
-* HDFS metadata changes must be upgradeable via any of the upgrade paths - automatic, direct or indirect.
-* More detailed policies based on the kind of upgrade are yet to be considered.
+HDFS data nodes store data in a private directory structure. The schema of that
+directory structure must remain stable to retain compatibility.
+
+###### Policy
+
+The HDFS metadata format SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Incompatible
+changes MUST include a process by which existing metada may be upgraded. The
+upgrade process MUST allow the cluster metadata to be rolled back to the older
+version and its older disk format. The rollback MUST restore the original data
+but is not REQUIRED to restore the updated data. Any incompatible change
+to the format MUST result in the major version number of the schema being
+incremented.
+
+The data node directory format SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving).
+
+##### AWS S3A Guard Metadata
+
+For each operation in the Hadoop S3 client (s3a) that reads or modifies
+file metadata, a shadow copy of that file metadata is stored in a separate
+metadata store, which offers HDFS-like consistency for the metadata, and may
+also provide faster lookups for things like file status or directory listings.
+S3A guard tables are created with a version marker which indicates
+compatibility.
+
+###### Policy
+
+The S3A guard metadata schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable). Any incompatible change
+to the schema MUST result in the version number of the schema being incremented.
+
+##### YARN Resource Manager State Store
+
+The YARN resource manager stores information about the cluster state in an
+external state store for use in fail over and recovery. If the schema used for
+the state store data does not remain compatible, the resource manager will not
+be able to recover its state and will fail to start. The state store data
+schema includes a version number that indicates compatibility.
+
+###### Policy
+
+The YARN resource manager state store data schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Any incompatible change
+to the schema MUST result in the major version number of the schema being
+incremented. Any compatible change to the schema MUST result in the minor
+version number being incremented.
+
+##### YARN Node Manager State Store
+
+The YARN node manager stores information about the node state in an
+external state store for use in recovery. If the schema used for the state
+store data does not remain compatible, the node manager will not
+be able to recover its state and will fail to start. The state store data
+schema includes a version number that indicates compatibility.
+
+###### Policy
+
+The YARN node manager state store data schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Any incompatible change
+to the schema MUST result in the major version number of the schema being
+incremented. Any compatible change to the schema MUST result in the minor
+version number being incremented.
+
+##### YARN Federation State Store
+
+The YARN resource manager federation service stores information about the
+federated clusters, running applications, and routing policies in an
+external state store for use in replication and recovery. If the schema used
+for the state store data does not remain compatible, the federation service
+will fail to initialize. The state store data schema includes a version number
+that indicates compatibility.
+
+###### Policy
+
+The YARN federation service state store data schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Any incompatible change
+to the schema MUST result in the major version number of the schema being
+incremented. Any compatible change to the schema MUST result in the minor
+version number being incremented.
 
 ### Command Line Interface (CLI)
 
-The Hadoop command line programs may be used either directly via the system shell or via shell scripts. Changing the path of a command, removing or renaming command line options, the order of arguments, or the command return code and output break compatibility and may adversely affect users.
+The Hadoop command line programs may be used either directly via the system
+shell or via shell scripts. The CLIs include both the user-facing commands, such
+as the hdfs command or the yarn command, and the admin-facing commands, such as
+the scripts used to start and stop daemons.  Changing the path of a command,
+removing or renaming command line options, the order of arguments, or the
+command return codes and output break compatibility and adversely affect users.
 
 #### Policy
 
-CLI commands are to be deprecated (warning when used) for one major release before they are removed or incompatibly modified in a subsequent major release.
+All Hadoop CLI paths, usage, and output SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable).
+Note that the CLI output SHALL be considered distinct from the log output
+generated by the Hadoop CLIs. The latter SHALL be governed by the policy on log
+output. Note also that for CLI output, all changes SHALL be considered
+incompatible changes.
 
 ### Web UI
 
-Web UI, particularly the content and layout of web pages, changes could potentially interfere with attempts to screen scrape the web pages for information.
+Web UI, particularly the content and layout of web pages, changes could
+potentially interfere with attempts to screen scrape the web pages for
+information. The Hadoop Web UI pages, however, are not meant to be scraped, e.g.
+for automation purposes. Users are expected to use REST APIs to programmatically
+access cluster information.
 
 #### Policy
 
-Web pages are not meant to be scraped and hence incompatible changes to them are allowed at any time. Users are expected to use REST APIs to get any information.
+The Hadoop Web UI SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Unstable](./InterfaceClassification.html#Unstable).
 
 ### Hadoop Configuration Files
 
-Users use (1) Hadoop-defined properties to configure and provide hints to Hadoop and (2) custom properties to pass information to jobs. Hence, compatibility of config properties is two-fold:
+Users use Hadoop-defined properties to configure and provide hints to Hadoop and
+custom properties to pass information to jobs. Users are encouraged to avoid
+using custom configuration property names that conflict with the namespace of
+Hadoop-defined properties and should avoid using any prefixes used by Hadoop,
+e.g. hadoop, io, ipc, fs, net, file, ftp, s3, kfs, ha, file, dfs, mapred,
+mapreduce, and yarn.
+
+#### Policy
+
+Hadoop-defined properties (names and meanings) SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). The units implied by a
+Hadoop-defined property MUST NOT change, even
+across major versions. Default values of Hadoop-defined properties SHALL be
+considered [Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
+
+### Log4j Configuration Files
 
-* Modifying key-names, units of values, and default values of Hadoop-defined properties.
-* Custom configuration property keys should not conflict with the namespace of Hadoop-defined properties. Typically, users should avoid using prefixes used by Hadoop: hadoop, io, ipc, fs, net, file, ftp, s3, kfs, ha, file, dfs, mapred, mapreduce, yarn.
+The log output produced by Hadoop daemons and CLIs is governed by a set of
+configuration files. These files control the minimum level of log message that
+will be output by the various components of Hadoop, as well as where and how
+those messages are stored.
 
 #### Policy
 
-* Hadoop-defined properties are to be deprecated at least for one major release before being removed. Modifying units for existing properties is not allowed.
-* The default values of Hadoop-defined properties can be changed across minor/major releases, but will remain the same across point releases within a minor release.
-* Currently, there is NO explicit policy regarding when new prefixes can be added/removed, and the list of prefixes to be avoided for custom configuration properties. However, as noted above, users should avoid using prefixes used by Hadoop: hadoop, io, ipc, fs, net, file, ftp, s3, kfs, ha, file, dfs, mapred, mapreduce, yarn.
+All Log4j configurations SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
 
 ### Directory Structure
 
-Source code, artifacts (source and tests), user logs, configuration files, output and job history are all stored on disk either local file system or HDFS. Changing the directory structure of these user-accessible files break compatibility, even in cases where the original path is preserved via symbolic links (if, for example, the path is accessed by a servlet that is configured to not follow symbolic links).
+Source code, artifacts (source and tests), user logs, configuration files,
+output, and job history are all stored on disk either local file system or HDFS.
+Changing the directory structure of these user-accessible files can break
+compatibility, even in cases where the original path is preserved via symbolic
+links (such as when the path is accessed by a servlet that is configured to
+not follow symbolic links).
 
 #### Policy
 
-* The layout of source code and build artifacts can change anytime, particularly so across major versions. Within a major version, the developers will attempt (no guarantees) to preserve the directory structure; however, individual files can be added/moved/deleted. The best way to ensure patches stay in sync with the code is to get them committed to the Apache source tree.
-* The directory structure of configuration files, user logs, and job history will be preserved across minor and point releases within a major release.
+The layout of source code and build artifacts SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable). Within a major version,
+the developer community SHOULD preserve the
+overall directory structure, though individual files MAY be added, moved, or
+deleted with no warning.
+
+The directory structure of configuration files, user logs, and job history SHALL
+be considered [Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
 
 ### Java Classpath
 
-User applications built against Hadoop might add all Hadoop jars (including Hadoop's library dependencies) to the application's classpath. Adding new dependencies or updating the version of existing dependencies may interfere with those in applications' classpaths.
+Hadoop provides several client artifacts that applications use to interact
+with the system. These artifacts typically have their own dependencies on
+common libraries. In the cases where these dependencies are exposed to
+end user applications or downstream consumers (i.e. not
+[shaded](https://stackoverflow.com/questions/13620281/what-is-the-maven-shade-plugin-used-for-and-why-would-you-want-to-relocate-java))
+changes to these dependencies can be disruptive. Developers are strongly
+encouraged to avoid exposing dependencies to clients by using techniques
+such as
+[shading](https://stackoverflow.com/questions/13620281/what-is-the-maven-shade-plugin-used-for-and-why-would-you-want-to-relocate-java).
+
+With regard to dependencies, adding a dependency is an incompatible change,
+whereas removing a dependency is a compatible change.
+
+Some user applications built against Hadoop may add all Hadoop JAR files
+(including Hadoop's library dependencies) to the application's classpath.
+Adding new dependencies or updating the versions of existing dependencies may
+interfere with those in applications' classpaths and hence their correct
+operation. Users are therefore discouraged from adopting this practice.
 
 #### Policy
 
-Currently, there is NO policy on when Hadoop's dependencies can change.
+The set of dependencies exposed by the Hadoop client artifacts SHALL be
+considered [Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). Any dependencies that are not
+exposed to clients (either because they are shaded or only exist in non-client
+artifacts) SHALL be considered [Private](./InterfaceClassification.html#Private)
+and [Unstable](./InterfaceClassification.html#Unstable)
 
 ### Environment variables
 
-Users and related projects often utilize the exported environment variables (eg HADOOP\_CONF\_DIR), therefore removing or renaming environment variables is an incompatible change.
+Users and related projects often utilize the environment variables exported by
+Hadoop (e.g. HADOOP\_CONF\_DIR). Removing or renaming environment variables can
+therefore impact end user applications.
 
 #### Policy
 
-Currently, there is NO policy on when the environment variables can change. Developers try to limit changes to major releases.
+The environment variables consumed by Hadoop and the environment variables made
+accessible to applications through YARN SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
+The developer community SHOULD limit changes to major releases.
 
 ### Build artifacts
 
-Hadoop uses maven for project management and changing the artifacts can affect existing user workflows.
+Hadoop uses Maven for project management. Changes to the contents of
+generated artifacts can impact existing user applications.
 
 #### Policy
 
-* Test artifacts: The test jars generated are strictly for internal use and are not expected to be used outside of Hadoop, similar to APIs annotated @Private, @Unstable.
-* Built artifacts: The hadoop-client artifact (maven groupId:artifactId) stays compatible within a major release, while the other artifacts can change in incompatible ways.
+The contents of Hadoop test artifacts SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable). Test artifacts include
+all JAR files generated from test source code and all JAR files that include
+"tests" in the file name.
+
+The Hadoop client artifacts SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). Client artifacts are the
+following:
+
+* hadoop-client
+* hadoop-client-api
+* hadoop-client-minicluster
+* hadoop-client-runtime
+* hadoop-hdfs-client
+* hadoop-hdfs-native-client
+* hadoop-mapreduce-client-app
+* hadoop-mapreduce-client-common
+* hadoop-mapreduce-client-core
+* hadoop-mapreduce-client-hs
+* hadoop-mapreduce-client-hs-plugins
+* hadoop-mapreduce-client-jobclient
+* hadoop-mapreduce-client-nativetask
+* hadoop-mapreduce-client-shuffle
+* hadoop-yarn-client
+
+All other build artifacts SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable).
 
 ### Hardware/Software Requirements
 
-To keep up with the latest advances in hardware, operating systems, JVMs, and other software, new Hadoop releases or some of their features might require higher versions of the same. For a specific environment, upgrading Hadoop might require upgrading other dependent software components.
+To keep up with the latest advances in hardware, operating systems, JVMs, and
+other software, new Hadoop releases may include features that require
+newer hardware, operating systems releases, or JVM versions than previous
+Hadoop releases. For a specific environment, upgrading Hadoop might require
+upgrading other dependent software components.
 
 #### Policies
 
 * Hardware
     * Architecture: The community has no plans to restrict Hadoop to specific architectures, but can have family-specific optimizations.
-    * Minimum resources: While there are no guarantees on the minimum resources required by Hadoop daemons, the community attempts to not increase requirements within a minor release.
-* Operating Systems: The community will attempt to maintain the same OS requirements (OS kernel versions) within a minor release. Currently GNU/Linux and Microsoft Windows are the OSes officially supported by the community while Apache Hadoop is known to work reasonably well on other OSes such as Apple MacOSX, Solaris etc.
-* The JVM requirements will not change across point releases within the same minor release except if the JVM version under question becomes unsupported. Minor/major releases might require later versions of JVM for some/all of the supported operating systems.
-* Other software: The community tries to maintain the minimum versions of additional software required by Hadoop. For example, ssh, kerberos etc.
+    * Minimum resources: While there are no guarantees on the minimum resources required by Hadoop daemons, the developer community SHOULD avoid increasing requirements within a minor release.
+* Operating Systems: The community SHOULD maintain the same minimum OS requirements (OS kernel versions) within a minor release. Currently GNU/Linux and Microsoft Windows are the OSes officially supported by the community, while Apache Hadoop is known to work reasonably well on other OSes such as Apple MacOSX, Solaris, etc.
+* The JVM requirements SHALL NOT change across minor releases within the same major release unless the JVM version in question becomes unsupported. The JVM version requirement MAY be different for different operating systems or even operating system releases.
+* File systems supported by Hadoop, e.g. through the HDFS FileSystem API, SHOULD not become unsupported between minor releases within a major version unless a migration path to an alternate client implementation is available.
 
 References
 ----------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7618fa91/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index c7309ab..451f9be 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -66,54 +66,103 @@ Hadoop uses the following kinds of audience in order of increasing/wider visibil
 
 #### Private
 
-The interface is for internal use within the project (such as HDFS or MapReduce)
-and should not be used by applications or by other projects. It is subject to
-change at anytime without notice. Most interfaces of a project are Private (also
-referred to as project-private).
+A Private interface is for internal use within the project (such as HDFS or
+MapReduce) and should not be used by applications or by other projects. Most
+interfaces of a project are Private (also referred to as project-private).
+Unless an interface is intentionally exposed for external consumption, it should
+be marked Private.
 
 #### Limited-Private
 
-The interface is used by a specified set of projects or systems (typically
-closely related projects). Other projects or systems should not use the
-interface. Changes to the interface will be communicated/negotiated with the
+A Limited-Private interface is used by a specified set of projects or systems
+(typically closely related projects). Other projects or systems should not use
+the interface. Changes to the interface will be communicated/negotiated with the
 specified projects. For example, in the Hadoop project, some interfaces are
 LimitedPrivate{HDFS, MapReduce} in that they are private to the HDFS and
 MapReduce projects.
 
 #### Public
 
-The interface is for general use by any application.
+A Public interface is for general use by any application.
+
+### Change Compatibility
+
+Changes to an API fall into two broad categories: compatible and incompatible.
+A compatible change is a change that meets the following criteria:
+
+* no existing capabilities are removed,
+* no existing capabilities are modified in a way that prevents their use by clients that were constructed to use the interface prior to the change, and
+* no capabilities are added that require changes to clients that were constructed to use the interface prior to the change.
+
+Any change that does not meet these three criteria is an incompatible change.
+Stated simply a compatible change will not break existing clients.  These
+examples are compatible changes:
+
+* adding a method to a Java class,
+* adding an optional parameter to a RESTful web service, or
+* adding a tag to an XML document.
+* making the audience annotation of an interface more broad (e.g. from Private to Public) or the change compatibility annotation more restrictive (e.g. from Evolving to Stable)
+
+These examples are incompatible changes:
+
+* removing a method from a Java class,
+* adding a method to a Java interface,
+* adding a required parameter to a RESTful web service, or
+* renaming a field in a JSON document.
+* making the audience annotation of an interface less broad (e.g. from Public to Limited Private) or the change compatibility annotation more restrictive (e.g. from Evolving to Unstable)
 
 ### Stability
 
-Stability denotes how stable an interface is, as in when incompatible changes to
-the interface are allowed. Hadoop APIs have the following levels of stability.
+Stability denotes how stable an interface is and when compatible and
+incompatible changes to the interface are allowed. Hadoop APIs have the
+following levels of stability.
 
 #### Stable
 
-Can evolve while retaining compatibility for minor release boundaries; in other
-words, incompatible changes to APIs marked as Stable are allowed only at major
-releases (i.e. at m.0).
+A Stable interface is exposed as a preferred means of communication. A Stable
+interface is expected not to change incompatibly within a major release and
+hence serves as a safe development target. A Stable interface may evolve
+compatibly between minor releases.
+
+Incompatible changes allowed: major (X.0.0)
+Compatible changes allowed: maintenance (x.Y.0)
 
 #### Evolving
 
-Evolving, but incompatible changes are allowed at minor releases (i.e. m .x)
+An Evolving interface is typically exposed so that users or external code can
+make use of a feature before it has stabilized. The expectation that an
+interface should "eventually" stabilize and be promoted to Stable, however,
+is not a requirement for the interface to be labeled as Evolving.
+
+Incompatible changes are allowed for Evolving interface only at minor releases.
+
+Incompatible changes allowed: minor (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
 #### Unstable
 
-Incompatible changes to Unstable APIs are allowed at any time. This usually makes
-sense for only private interfaces.
+An Unstable interface is one for which no compatibility guarantees are made. An
+Unstable interface is not necessarily unstable. An unstable interface is
+typically exposed because a user or external code needs to access an interface
+that is not intended for consumption. The interface is exposed as an Unstable
+interface to state clearly that even though the interface is exposed, it is not
+the preferred access path, and no compatibility guarantees are made for it.
 
-However one may call this out for a supposedly public interface to highlight
-that it should not be used as an interface; for public interfaces, labeling it
-as Not-an-interface is probably more appropriate than "Unstable".
+Unless there is a reason to offer a compatibility guarantee on an interface,
+whether it is exposed or not, it should be labeled as Unstable.  Private
+interfaces also should be Unstable in most cases.
 
-Examples of publicly visible interfaces that are unstable
-(i.e. not-an-interface): GUI, CLIs whose output format will change.
+Incompatible changes to Unstable interfaces are allowed at any time.
+
+Incompatible changes allowed: maintenance (x.y.Z)
+Compatible changes allowed: maintenance (x.y.Z)
 
 #### Deprecated
 
-APIs that could potentially be removed in the future and should not be used.
+A Deprecated interface could potentially be removed in the future and should
+not be used.  Even so, a Deprecated interface will continue to function until
+it is removed.  When a Deprecated interface can be removed depends on whether
+it is also Stable, Evolving, or Unstable.
 
 How are the Classifications Recorded?
 -------------------------------------
@@ -121,95 +170,101 @@ How are the Classifications Recorded?
 How will the classification be recorded for Hadoop APIs?
 
 * Each interface or class will have the audience and stability recorded using
-  annotations in org.apache.hadoop.classification package.
+  annotations in the org.apache.hadoop.classification package.
 
-* The javadoc generated by the maven target javadoc:javadoc lists only the public API.
+* The javadoc generated by the maven target javadoc:javadoc lists only the
+  public API.
 
 * One can derive the audience of java classes and java interfaces by the
   audience of the package in which they are contained. Hence it is useful to
   declare the audience of each java package as public or private (along with the
   private audience variations).
 
+How will the classification be recorded for other interfaces, such as CLIs?
+
+* See the [Hadoop Compatibility](Compatibility.html) page for details.
+
 FAQ
 ---
 
 * Why aren’t the java scopes (private, package private and public) good enough?
     * Java’s scoping is not very complete. One is often forced to make a class
-      public in order for other internal components to use it. It does not have
-      friends or sub-package-private like C++.
-
-* But I can easily access a private implementation interface if it is Java public.
-  Where is the protection and control?
-    * The purpose of this is not providing absolute access control. Its purpose
-      is to communicate to users and developers. One can access private
-      implementation functions in libc; however if they change the internal
-      implementation details, your application will break and you will have
-      little sympathy from the folks who are supplying libc. If you use a
-      non-public interface you understand the risks.
-
-* Why bother declaring the stability of a private interface?
-  Aren’t private interfaces always unstable?
-    * Private interfaces are not always unstable. In the cases where they are
-      stable they capture internal properties of the system and can communicate
+      public in order for other internal components to use it. It also does not
+      have friends or sub-package-private like C++.
+
+* But I can easily access a Private interface if it is Java public. Where is the
+  protection and control?
+    * The purpose of this classification scheme is not providing absolute
+      access control. Its purpose is to communicate to users and developers.
+      One can access private implementation functions in libc; however if
+      they change the internal implementation details, the application will
+      break and one will receive little sympathy from the folks who are
+      supplying libc. When using a non-public interface, the risks are
+      understood.
+
+* Why bother declaring the stability of a Private interface? Aren’t Private
+  interfaces always Unstable?
+    * Private interfaces are not always Unstable. In the cases where they are
+      Stable they capture internal properties of the system and can communicate
       these properties to its internal users and to developers of the interface.
-        * e.g. In HDFS, NN-DN protocol is private but stable and can help
-          implement rolling upgrades. It communicates that this interface should
-          not be changed in incompatible ways even though it is private.
-        * e.g. In HDFS, FSImage stability provides more flexible rollback.
-
-* What is the harm in applications using a private interface that is stable? How
-  is it different than a public stable interface?
-    * While a private interface marked as stable is targeted to change only at
+        * e.g. In HDFS, NN-DN protocol is Private but Stable and can help
+          implement rolling upgrades. The stability annotation communicates that
+          this interface should not be changed in incompatible ways even though
+          it is Private.
+        * e.g. In HDFS, FSImage the Stabile designation provides more flexible
+          rollback.
+
+* What is the harm in applications using a Private interface that is Stable?
+  How is it different from a Public Stable interface?
+    * While a Private interface marked as Stable is targeted to change only at
       major releases, it may break at other times if the providers of that
-      interface are willing to change the internal users of that
-      interface. Further, a public stable interface is less likely to break even
+      interface also are willing to change the internal consumers of that
+      interface. Further, a Public Stable interface is less likely to break even
       at major releases (even though it is allowed to break compatibility)
-      because the impact of the change is larger. If you use a private interface
+      because the impact of the change is larger. If you use a Private interface
       (regardless of its stability) you run the risk of incompatibility.
 
-* Why bother with Limited-private? Isn’t it giving special treatment to some projects?
-  That is not fair.
-    * First, most interfaces should be public or private; actually let us state
-      it even stronger: make it private unless you really want to expose it to
-      public for general use.
-    * Limited-private is for interfaces that are not intended for general
+* Why bother with Limited-Private? Isn’t it giving special treatment to some
+  projects? That is not fair.
+    * Most interfaces should be Public or Private. An interface should be
+      Private unless it is explicitly intended for general use.
+    * Limited-Private is for interfaces that are not intended for general
       use. They are exposed to related projects that need special hooks. Such a
-      classification has a cost to both the supplier and consumer of the limited
+      classification has a cost to both the supplier and consumer of the
       interface. Both will have to work together if ever there is a need to
       break the interface in the future; for example the supplier and the
       consumers will have to work together to get coordinated releases of their
-      respective projects. This should not be taken lightly – if you can get
-      away with private then do so; if the interface is really for general use
-      for all applications then do so. But remember that making an interface
-      public has huge responsibility. Sometimes Limited-private is just right.
-    * A good example of a limited-private interface is BlockLocations, This is a
-      fairly low-level interface that we are willing to expose to MR and perhaps
-      HBase. We are likely to change it down the road and at that time we will
-      coordinate release effort with the MR team.
-      While MR and HDFS are always released in sync today, they may
-      change down the road.
-    * If you have a limited-private interface with many projects listed then you
-      are fooling yourself. It is practically public.
-    * It might be worth declaring a special audience classification called
-      Hadoop-Private for the Hadoop family.
-
-* Lets treat all private interfaces as Hadoop-private. What is the harm in
-  projects in the Hadoop family have access to private classes?
-    * Do we want MR accessing class files that are implementation details inside
-      HDFS. There used to be many such layer violations in the code that we have
-      been cleaning up over the last few years. We don’t want such layer
-      violations to creep back in by no separating between the major components
-      like HDFS and MR.
-
-* Aren't all public interfaces stable?
-    * One may mark a public interface as evolving in its early days. Here one is
+      respective projects. This contract should not be taken lightly–use
+      Private if possible; if the interface is really for general use
+      for all applications then use Public. Always remember that making an
+      interface Public comes with large burden of responsibility. Sometimes
+      Limited-Private is just right.
+    * A good example of a Limited-Private interface is BlockLocations. This
+      interface is a fairly low-level interface that is exposed to MapReduce
+      and HBase. The interface is likely to change down the road, and at that
+      time the release effort will have to be coordinated with the
+      MapReduce development team. While MapReduce and HDFS are always released
+      in sync today, that policy may change down the road.
+    * If you have a Limited-Private interface with many projects listed then
+      the interface is probably a good candidate to be made Public.
+
+* Let's treat all Private interfaces as Limited-Private for all of Hadoop. What
+  is the harm if projects in the Hadoop family have access to private classes?
+    * There used to be many cases in the code where one project depended on the
+      internal implementation details of another. A significant effort went
+      into cleaning up those issues. Opening up all interfaces as
+      Limited-Private for all of Hadoop would open the door to reintroducing
+      such coupling issues.
+
+* Aren't all Public interfaces Stable?
+    * One may mark a Public interface as Evolving in its early days. Here one is
       promising to make an effort to make compatible changes but may need to
       break it at minor releases.
-    * One example of a public interface that is unstable is where one is
+    * One example of a Public interface that is Unstable is where one is
       providing an implementation of a standards-body based interface that is
       still under development. For example, many companies, in an attempt to be
       first to market, have provided implementations of a new NFS protocol even
       when the protocol was not fully completed by IETF. The implementor cannot
-      evolve the interface in a fashion that causes least distruption because
+      evolve the interface in a fashion that causes least disruption because
       the stability is controlled by the standards body. Hence it is appropriate
-      to label the interface as unstable.
+      to label the interface as Unstable.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] hadoop git commit: HDFS-12480. TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in trunk. Contributed by Hanisha Koneru

Posted by as...@apache.org.
HDFS-12480. TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in trunk. Contributed by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31b58406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31b58406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31b58406

Branch: refs/heads/YARN-6592
Commit: 31b58406ac369716ef1665b7d60a3409117bdf9d
Parents: 595d478
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Sep 19 10:37:07 2017 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Sep 19 10:37:07 2017 +0530

----------------------------------------------------------------------
 .../namenode/metrics/TestNameNodeMetrics.java     | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31b58406/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 077a5f8..db9adbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -851,22 +851,22 @@ public class TestNameNodeMetrics {
         getMetrics(NS_METRICS));
     
     assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-    assertGauge("LastWrittenTransactionId", 3L, getMetrics(NS_METRICS));
-    assertGauge("TransactionsSinceLastCheckpoint", 3L, getMetrics(NS_METRICS));
-    assertGauge("TransactionsSinceLastLogRoll", 3L, getMetrics(NS_METRICS));
+    assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
+    assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
+    assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
     
     fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
     
     assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-    assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
-    assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
-    assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
+    assertGauge("LastWrittenTransactionId", 5L, getMetrics(NS_METRICS));
+    assertGauge("TransactionsSinceLastCheckpoint", 5L, getMetrics(NS_METRICS));
+    assertGauge("TransactionsSinceLastLogRoll", 5L, getMetrics(NS_METRICS));
     
     cluster.getNameNodeRpc().rollEditLog();
     
     assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-    assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
-    assertGauge("TransactionsSinceLastCheckpoint", 6L, getMetrics(NS_METRICS));
+    assertGauge("LastWrittenTransactionId", 7L, getMetrics(NS_METRICS));
+    assertGauge("TransactionsSinceLastCheckpoint", 7L, getMetrics(NS_METRICS));
     assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
     
     cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
@@ -876,7 +876,7 @@ public class TestNameNodeMetrics {
     long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
         getMetrics(NS_METRICS));
     assertTrue(lastCkptTime < newLastCkptTime);
-    assertGauge("LastWrittenTransactionId", 8L, getMetrics(NS_METRICS));
+    assertGauge("LastWrittenTransactionId", 9L, getMetrics(NS_METRICS));
     assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
     assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] hadoop git commit: YARN-7045. Remove FSLeafQueue#addAppSchedulable. (Contributed by Sen Zhao via Yufei Gu)

Posted by as...@apache.org.
YARN-7045. Remove FSLeafQueue#addAppSchedulable. (Contributed by Sen Zhao via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a92ef030
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a92ef030
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a92ef030

Branch: refs/heads/YARN-6592
Commit: a92ef030a2707182e90acee644e47c8ef7e1fd8d
Parents: 263e2c6
Author: Yufei Gu <yu...@apache.org>
Authored: Thu Sep 21 12:31:07 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Thu Sep 21 12:31:07 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/scheduler/fair/FSLeafQueue.java       | 10 ----------
 .../resourcemanager/scheduler/fair/TestFSLeafQueue.java   |  6 +++---
 .../resourcemanager/scheduler/fair/TestFairScheduler.java |  6 +++---
 3 files changed, 6 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92ef030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 1dcfffc..89ad1e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -94,16 +94,6 @@ public class FSLeafQueue extends FSQueue {
     }
   }
   
-  // for testing
-  void addAppSchedulable(FSAppAttempt appSched) {
-    writeLock.lock();
-    try {
-      runnableApps.add(appSched);
-    } finally {
-      writeLock.unlock();
-    }
-  }
-  
   /**
    * Removes the given app from this queue.
    * @return whether or not the app was runnable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92ef030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 2aed9bf..b6b3f7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -89,8 +89,8 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
     FSAppAttempt app = mock(FSAppAttempt.class);
     Mockito.when(app.getDemand()).thenReturn(maxResource);
 
-    schedulable.addAppSchedulable(app);
-    schedulable.addAppSchedulable(app);
+    schedulable.addApp(app, true);
+    schedulable.addApp(app, true);
 
     schedulable.updateDemand();
 
@@ -165,7 +165,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
       @Override
       public void run() {
         for (int i=0; i < 500; i++) {
-          schedulable.addAppSchedulable(app);
+          schedulable.addApp(app, true);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92ef030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 0ef4d7b..d5b1fcc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5221,12 +5221,12 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     FSLeafQueue aQueue =
         new FSLeafQueue("root.queue1.a", scheduler, queue1);
     aQueue.setMaxShare(maxResource);
-    aQueue.addAppSchedulable(app1);
+    aQueue.addApp(app1, true);
 
     FSLeafQueue bQueue =
         new FSLeafQueue("root.queue1.b", scheduler, queue1);
     bQueue.setMaxShare(maxResource);
-    bQueue.addAppSchedulable(app2);
+    bQueue.addApp(app2, true);
 
     queue1.setMaxShare(maxResource);
     queue1.addChildQueue(aQueue);
@@ -5270,7 +5270,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     FSAppAttempt app = mock(FSAppAttempt.class);
     Mockito.when(app.getDemand()).thenReturn(resource);
     Mockito.when(app.getResourceUsage()).thenReturn(resource);
-    child1.addAppSchedulable(app);
+    child1.addApp(app, true);
     child1.updateDemand();
 
     String childQueueString = "{Name: root.parent.child1,"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] hadoop git commit: Revert "MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe"

Posted by as...@apache.org.
Revert "MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe"

This reverts commit b3d61304f2fa4a99526f7a60ccaac9f262083079.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea845ba5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea845ba5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea845ba5

Branch: refs/heads/YARN-6592
Commit: ea845ba58c585647c4be8d30d9b814f098e34a12
Parents: aa6e8d2
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 19 08:45:05 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 19 08:45:05 2017 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++++++-----------
 1 file changed, 7 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea845ba5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 06a3e42..863da7e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,6 +992,13 @@ public class ShuffleHandler extends AuxiliaryService {
         return;
       }
 
+      // this audit log is disabled by default,
+      // to turn it on please enable this audit log
+      // on log4j.properties by uncommenting the setting
+      if (AUDITLOG.isDebugEnabled()) {
+        AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
+                         " reducer " + reduceQ.get(0));
+      }
       int reduceId;
       String jobId;
       try {
@@ -1176,17 +1183,6 @@ public class ShuffleHandler extends AuxiliaryService {
 
       // Now set the response headers.
       setResponseHeaders(response, keepAliveParam, contentLength);
-
-      // this audit log is disabled by default,
-      // to turn it on please enable this audit log
-      // on log4j.properties by uncommenting the setting
-      if (AUDITLOG.isDebugEnabled()) {
-        StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
-        sb.append(" mappers: ").append(mapIds);
-        sb.append(" reducer ").append(reduce);
-        sb.append(" length ").append(contentLength);
-        AUDITLOG.debug(sb.toString());
-      }
     }
 
     protected void setResponseHeaders(HttpResponse response,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] hadoop git commit: MAPREDUCE-6960. Shuffle Handler prints disk error stack traces for every read failure.

Posted by as...@apache.org.
MAPREDUCE-6960. Shuffle Handler prints disk error stack traces for every read failure.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/595d4784
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/595d4784
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/595d4784

Branch: refs/heads/YARN-6592
Commit: 595d478408104bdfe1f08efd79930e18862fafbb
Parents: 3a20deb
Author: Eric Payne <ep...@apache.org>
Authored: Tue Sep 19 10:35:15 2017 -0500
Committer: Eric Payne <ep...@apache.org>
Committed: Tue Sep 19 10:35:15 2017 -0500

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/mapred/ShuffleHandler.java    | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/595d4784/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index b7f2c6d..0eeae19 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -1088,7 +1089,11 @@ public class ShuffleHandler extends AuxiliaryService {
           }
           nextMap.addListener(new ReduceMapFileCount(reduceContext));
         } catch (IOException e) {
-          LOG.error("Shuffle error :", e);
+          if (e instanceof DiskChecker.DiskErrorException) {
+            LOG.error("Shuffle error :" + e);
+          } else {
+            LOG.error("Shuffle error :", e);
+          }
           String errorMessage = getErrorMessage(e);
           sendError(reduceContext.getCtx(), errorMessage,
               INTERNAL_SERVER_ERROR);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] hadoop git commit: HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.

Posted by as...@apache.org.
HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d9d7bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d9d7bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d9d7bc

Branch: refs/heads/YARN-6592
Commit: 12d9d7bc509bca82b8f40301e3dc5ca764be45eb
Parents: 51edaac
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 19 16:42:20 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 19 16:42:20 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   | 156 ++++++++++++++-----
 1 file changed, 113 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d9d7bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 2846dbf..36ac8b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -28,6 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -40,34 +40,41 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 
 public class TestLeaseRecoveryStriped {
-  public static final Log LOG = LogFactory
-      .getLog(TestLeaseRecoveryStriped.class);
+  public static final Logger LOG = LoggerFactory
+      .getLogger(TestLeaseRecoveryStriped.class);
 
   private final ErasureCodingPolicy ecPolicy =
       StripedFileTestUtil.getDefaultECPolicy();
   private final int dataBlocks = ecPolicy.getNumDataUnits();
   private final int parityBlocks = ecPolicy.getNumParityUnits();
   private final int cellSize = ecPolicy.getCellSize();
-  private final int stripSize = dataBlocks * cellSize;
-  private final int stripesPerBlock = 15;
+  private final int stripeSize = dataBlocks * cellSize;
+  private final int stripesPerBlock = 4;
   private final int blockSize = cellSize * stripesPerBlock;
   private final int blockGroupSize = blockSize * dataBlocks;
   private static final int bytesPerChecksum = 512;
 
   static {
     GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
+    GenericTestUtils.setLogLevel(DFSStripedOutputStream.LOG, Level.DEBUG);
+    GenericTestUtils.setLogLevel(BlockRecoveryWorker.LOG, Level.DEBUG);
+    GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.DEBUG);
   }
 
   static private final String fakeUsername = "fakeUser1";
@@ -83,7 +90,7 @@ public class TestLeaseRecoveryStriped {
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-    conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
+    conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 60000L);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
         false);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -104,78 +111,118 @@ public class TestLeaseRecoveryStriped {
     }
   }
 
-  private int[][][] getBlockLengthsSuite() {
+  private static class BlockLengths {
+    private final int[] blockLengths;
+    private final long safeLength;
+
+    BlockLengths(ErasureCodingPolicy policy, int[] blockLengths) {
+      this.blockLengths = blockLengths;
+      long[] longArray = Arrays.stream(blockLengths).asLongStream().toArray();
+      this.safeLength = StripedBlockUtil.getSafeLength(policy, longArray);
+    }
+
+    @Override
+    public String toString() {
+      return new ToStringBuilder(this)
+          .append("blockLengths", getBlockLengths())
+          .append("safeLength", getSafeLength())
+          .toString();
+    }
+
+    /**
+     * Length of each block in a block group.
+     */
+    public int[] getBlockLengths() {
+      return blockLengths;
+    }
+
+    /**
+     * Safe length, calculated by the block lengths.
+     */
+    public long getSafeLength() {
+      return safeLength;
+    }
+  }
+
+  private BlockLengths[] getBlockLengthsSuite() {
     final int groups = 4;
-    final int minNumCell = 3;
-    final int maxNumCell = 11;
+    final int minNumCell = 1;
+    final int maxNumCell = stripesPerBlock;
     final int minNumDelta = -4;
     final int maxNumDelta = 2;
-    int delta = 0;
-    int[][][] blkLenSuite = new int[groups][][];
+    BlockLengths[] suite = new BlockLengths[groups];
     Random random = ThreadLocalRandom.current();
-    for (int i = 0; i < blkLenSuite.length; i++) {
-      if (i == blkLenSuite.length - 1) {
-        delta = bytesPerChecksum;
-      }
-      int[][] suite = new int[2][];
-      int[] lens = new int[dataBlocks + parityBlocks];
-      long[] lenInLong = new long[lens.length];
-      for (int j = 0; j < lens.length; j++) {
+    for (int i = 0; i < groups; i++) {
+      int[] blockLengths = new int[dataBlocks + parityBlocks];
+      for (int j = 0; j < blockLengths.length; j++) {
+        // Choose a random number of cells for the block
         int numCell = random.nextInt(maxNumCell - minNumCell + 1) + minNumCell;
-        int numDelta = j < dataBlocks ?
-            random.nextInt(maxNumDelta - minNumDelta + 1) + minNumDelta : 0;
-        lens[j] = cellSize * numCell + delta * numDelta;
-        lenInLong[j] = lens[j];
+        // For data blocks, jitter the length a bit
+        int numDelta = 0;
+        if (i == groups - 1 && j < dataBlocks) {
+          numDelta = random.nextInt(maxNumDelta - minNumDelta + 1) +
+              minNumDelta;
+        }
+        blockLengths[j] = (cellSize * numCell) + (bytesPerChecksum * numDelta);
       }
-      suite[0] = lens;
-      suite[1] = new int[]{
-          (int) StripedBlockUtil.getSafeLength(ecPolicy, lenInLong)};
-      blkLenSuite[i] = suite;
+      suite[i] = new BlockLengths(ecPolicy, blockLengths);
     }
-    return blkLenSuite;
+    return suite;
   }
 
-  private final int[][][] blockLengthsSuite = getBlockLengthsSuite();
+  private final BlockLengths[] blockLengthsSuite = getBlockLengthsSuite();
 
   @Test
   public void testLeaseRecovery() throws Exception {
+    LOG.info("blockLengthsSuite: " +
+        Arrays.toString(blockLengthsSuite));
     for (int i = 0; i < blockLengthsSuite.length; i++) {
-      int[] blockLengths = blockLengthsSuite[i][0];
-      int safeLength = blockLengthsSuite[i][1][0];
+      BlockLengths blockLengths = blockLengthsSuite[i];
       try {
-        runTest(blockLengths, safeLength);
+        runTest(blockLengths.getBlockLengths(), blockLengths.getSafeLength());
       } catch (Throwable e) {
         String msg = "failed testCase at i=" + i + ", blockLengths="
-            + Arrays.toString(blockLengths) + "\n"
+            + blockLengths + "\n"
             + StringUtils.stringifyException(e);
         Assert.fail(msg);
       }
     }
   }
 
-  private void runTest(int[] blockLengths, int safeLength) throws Exception {
+  private void runTest(int[] blockLengths, long safeLength) throws Exception {
     writePartialBlocks(blockLengths);
     recoverLease();
 
     List<Long> oldGS = new ArrayList<>();
     oldGS.add(1001L);
-    StripedFileTestUtil.checkData(dfs, p, safeLength,
+    StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
         new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
     // After recovery, storages are reported by primary DN. we should verify
     // storages reported by blockReport.
     cluster.restartNameNode(true);
     cluster.waitFirstBRCompleted(0, 10000);
-    StripedFileTestUtil.checkData(dfs, p, safeLength,
+    StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
         new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
   }
 
+  /**
+   * Write a file with blocks of different lengths.
+   *
+   * This method depends on completing before the DFS socket timeout.
+   * Otherwise, the client will mark timed-out streamers as failed, and the
+   * write will fail if there are too many failed streamers.
+   *
+   * @param blockLengths lengths of blocks to write
+   * @throws Exception
+   */
   private void writePartialBlocks(int[] blockLengths) throws Exception {
     final FSDataOutputStream out = dfs.create(p);
     final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out
         .getWrappedStream();
-    int length = (stripesPerBlock - 1) * stripSize;
+    int length = (stripesPerBlock - 1) * stripeSize;
     int[] posToKill = getPosToKill(blockLengths);
     int checkingPos = nextCheckingPos(posToKill, 0);
+    Set<Integer> stoppedStreamerIndexes = new HashSet<>();
     try {
       for (int pos = 0; pos < length; pos++) {
         out.write(StripedFileTestUtil.getByte(pos));
@@ -183,15 +230,31 @@ public class TestLeaseRecoveryStriped {
           for (int index : getIndexToStop(posToKill, pos)) {
             out.flush();
             stripedOut.enqueueAllCurrentPackets();
+            LOG.info("Stopping block stream idx {} at file offset {} block " +
+                    "length {}", index, pos, blockLengths[index]);
             StripedDataStreamer s = stripedOut.getStripedDataStreamer(index);
             waitStreamerAllAcked(s);
             waitByteSent(s, blockLengths[index]);
             stopBlockStream(s);
+            stoppedStreamerIndexes.add(index);
           }
           checkingPos = nextCheckingPos(posToKill, pos);
         }
       }
     } finally {
+      // Flush everything
+      out.flush();
+      stripedOut.enqueueAllCurrentPackets();
+      // Wait for streamers that weren't killed above to be written out
+      for (int i=0; i< blockLengths.length; i++) {
+        if (stoppedStreamerIndexes.contains(i)) {
+          continue;
+        }
+        StripedDataStreamer s = stripedOut.getStripedDataStreamer(i);
+        LOG.info("Waiting for block stream idx {} to reach length {}", i,
+            blockLengths[i]);
+        waitStreamerAllAcked(s);
+      }
       DFSTestUtil.abortStream(stripedOut);
     }
   }
@@ -210,7 +273,7 @@ public class TestLeaseRecoveryStriped {
     int[] posToKill = new int[dataBlocks + parityBlocks];
     for (int i = 0; i < dataBlocks; i++) {
       int numStripe = (blockLengths[i] - 1) / cellSize;
-      posToKill[i] = numStripe * stripSize + i * cellSize
+      posToKill[i] = numStripe * stripeSize + i * cellSize
           + blockLengths[i] % cellSize;
       if (blockLengths[i] % cellSize == 0) {
         posToKill[i] += cellSize;
@@ -220,7 +283,7 @@ public class TestLeaseRecoveryStriped {
         + parityBlocks; i++) {
       Preconditions.checkArgument(blockLengths[i] % cellSize == 0);
       int numStripe = (blockLengths[i]) / cellSize;
-      posToKill[i] = numStripe * stripSize;
+      posToKill[i] = numStripe * stripeSize;
     }
     return posToKill;
   }
@@ -243,13 +306,20 @@ public class TestLeaseRecoveryStriped {
         public Boolean get() {
           return s.bytesSent >= byteSent;
         }
-      }, 100, 3000);
+      }, 100, 30000);
     } catch (TimeoutException e) {
       throw new IOException("Timeout waiting for streamer " + s + ". Sent="
           + s.bytesSent + ", expected=" + byteSent);
     }
   }
 
+  /**
+   * Stop the block stream without immediately inducing a hard failure.
+   * Packets can continue to be queued until the streamer hits a socket timeout.
+   *
+   * @param s
+   * @throws Exception
+   */
   private void stopBlockStream(StripedDataStreamer s) throws Exception {
     IOUtils.NullOutputStream nullOutputStream = new IOUtils.NullOutputStream();
     Whitebox.setInternalState(s, "blockStream",
@@ -257,8 +327,8 @@ public class TestLeaseRecoveryStriped {
   }
 
   private void recoverLease() throws Exception {
-    final DistributedFileSystem dfs2 = (DistributedFileSystem) getFSAsAnotherUser(
-        conf);
+    final DistributedFileSystem dfs2 =
+        (DistributedFileSystem) getFSAsAnotherUser(conf);
     try {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] hadoop git commit: HADOOP-14835. mvn site build throws SAX errors. Contributed by Andrew Wang and Sean Mackrory.

Posted by as...@apache.org.
HADOOP-14835. mvn site build throws SAX errors. Contributed by Andrew Wang and Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cf3540f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cf3540f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cf3540f

Branch: refs/heads/YARN-6592
Commit: 3cf3540f19b5fd1a174690db9f1b7be2977d96ba
Parents: b3d6130
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Sep 18 15:13:42 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Sep 18 15:13:42 2017 -0700

----------------------------------------------------------------------
 BUILDING.txt                                       |  2 ++
 dev-support/bin/create-release                     |  1 +
 .../hadoop-mapreduce-client/pom.xml                | 17 ++++++++++++++++-
 hadoop-project-dist/pom.xml                        | 17 ++++++++++++++++-
 hadoop-project/pom.xml                             |  2 ++
 hadoop-yarn-project/hadoop-yarn/pom.xml            | 17 ++++++++++++++++-
 6 files changed, 53 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 14deec8..47aaab4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -308,6 +308,8 @@ Create a local staging version of the website (in /tmp/hadoop-site)
 
   $ mvn clean site -Preleasedocs; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
 
+Note that the site needs to be built in a second pass after other artifacts.
+
 ----------------------------------------------------------------------------------
 Installing Hadoop
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/dev-support/bin/create-release
----------------------------------------------------------------------
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b22e90b..b98c058 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -564,6 +564,7 @@ function makearelease
     "${MVN}" "${MVN_ARGS[@]}" install \
       site site:stage \
       -DskipTests \
+      -DskipShade \
       -Pdist,src \
       "${DOCFLAGS}"
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index aa7c7b1..274a821 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -196,6 +196,13 @@
         <jdiff.stability>-unstable</jdiff.stability>
         <jdiff.javadoc.maxmemory>512m</jdiff.javadoc.maxmemory>
       </properties>
+      <dependencies>
+        <dependency>
+          <groupId>xerces</groupId>
+          <artifactId>xercesImpl</artifactId>
+          <version>${xerces.jdiff.version}</version>
+        </dependency>
+      </dependencies>
       <build>
         <plugins>
           <plugin>
@@ -238,6 +245,14 @@
                       <outputDirectory>${project.build.directory}</outputDirectory>
                       <destFileName>hadoop-annotations.jar</destFileName>
                     </artifactItem>
+                    <artifactItem>
+                      <groupId>xerces</groupId>
+                      <artifactId>xercesImpl</artifactId>
+                      <version>${xerces.version.jdiff}</version>
+                      <overWrite>false</overWrite>
+                      <outputDirectory>${project.build.directory}</outputDirectory>
+                      <destFileName>xerces.jar</destFileName>
+                    </artifactItem>
                   </artifactItems>
                 </configuration>
               </execution>
@@ -275,7 +290,7 @@
                            sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
                            maxmemory="${jdiff.javadoc.maxmemory}">
                     <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
-                            path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
+                            path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
                       <param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
                       <param name="-newapi" value="${project.name} ${project.version}"/>
                       <param name="-oldapidir" value="${basedir}/${dev-support.relative.dir}/jdiff"/>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index addc2a5..8815dd4 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -152,6 +152,13 @@
         <jdiff.compatibility></jdiff.compatibility>
         <jdiff.javadoc.maxmemory>512m</jdiff.javadoc.maxmemory>
       </properties>
+      <dependencies>
+        <dependency>
+          <groupId>xerces</groupId>
+          <artifactId>xercesImpl</artifactId>
+          <version>${xerces.jdiff.version}</version>
+        </dependency>
+      </dependencies>
       <build>
         <plugins>
           <plugin>
@@ -194,6 +201,14 @@
                       <outputDirectory>${project.build.directory}</outputDirectory>
                       <destFileName>hadoop-annotations.jar</destFileName>
                     </artifactItem>
+                    <artifactItem>
+                      <groupId>xerces</groupId>
+                      <artifactId>xercesImpl</artifactId>
+                      <version>${xerces.jdiff.version}</version>
+                      <overWrite>false</overWrite>
+                      <outputDirectory>${project.build.directory}</outputDirectory>
+                      <destFileName>xerces.jar</destFileName>
+                    </artifactItem>
                   </artifactItems>
                 </configuration>
               </execution>
@@ -259,7 +274,7 @@
                              sourceFiles="${basedir}/dev-support/jdiff/Null.java"
                              maxmemory="${jdiff.javadoc.maxmemory}">
                       <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
-                              path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
+                              path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
                         <param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
                         <param name="-newapi" value="${project.name} ${project.version}"/>
                         <param name="-oldapidir" value="${basedir}/dev-support/jdiff"/>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b8fb961..a698126 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -45,6 +45,8 @@
     <!-- These 2 versions are defined here because they are used -->
     <!-- JDIFF generation from embedded ant in the antrun plugin -->
     <jdiff.version>1.0.9</jdiff.version>
+    <!-- Version number for xerces used by JDiff -->
+    <xerces.jdiff.version>2.11.0</xerces.jdiff.version>
 
     <kafka.version>0.8.2.1</kafka.version>
     <hbase.version>1.2.6</hbase.version>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-yarn-project/hadoop-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index be435d5..288f4bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -113,6 +113,13 @@
         <jdiff.compatibility></jdiff.compatibility>
         <jdiff.javadoc.maxmemory>512m</jdiff.javadoc.maxmemory>
       </properties>
+      <dependencies>
+        <dependency>
+          <groupId>xerces</groupId>
+          <artifactId>xercesImpl</artifactId>
+          <version>${xerces.jdiff.version}</version>
+        </dependency>
+      </dependencies>
       <build>
         <plugins>
           <plugin>
@@ -155,6 +162,14 @@
                       <outputDirectory>${project.build.directory}</outputDirectory>
                       <destFileName>hadoop-annotations.jar</destFileName>
                     </artifactItem>
+                    <artifactItem>
+                      <groupId>xerces</groupId>
+                      <artifactId>xercesImpl</artifactId>
+                      <version>${xerces.version.jdiff}</version>
+                      <overWrite>false</overWrite>
+                      <outputDirectory>${project.build.directory}</outputDirectory>
+                      <destFileName>xerces.jar</destFileName>
+                    </artifactItem>
                   </artifactItems>
                 </configuration>
               </execution>
@@ -193,7 +208,7 @@
                       sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
                              maxmemory="${jdiff.javadoc.maxmemory}">
                       <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
-                              path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
+                              path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
                         <param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
                         <param name="-newapi" value="${project.name} ${project.version}"/>
                         <param name="-oldapidir" value="${basedir}/${dev-support.relative.dir}/jdiff"/>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] hadoop git commit: HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot finish in 60s. (SammiChen via lei)

Posted by as...@apache.org.
HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot finish in 60s. (SammiChen via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bbeacb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bbeacb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bbeacb7

Branch: refs/heads/YARN-6592
Commit: 7bbeacb75e93261dbda0e8efcde510e5fcf83efb
Parents: fda1221
Author: Lei Xu <le...@apache.org>
Authored: Tue Sep 19 11:50:01 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Tue Sep 19 11:50:01 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bbeacb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 72b1412..713a10b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -456,8 +456,8 @@ public class TestReconstructStripedFile {
     ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
     fs.getClient().setErasureCodingPolicy("/", policy.getName());
 
-    final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
-    for (int i = 0; i < 100; i++) {
+    final int fileLen = cellSize * ecPolicy.getNumDataUnits();
+    for (int i = 0; i < 50; i++) {
       writeFile(fs, "/ec-file-" + i, fileLen);
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] hadoop git commit: Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák."

Posted by as...@apache.org.
Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák."

This reverts commit 2018538fdba1a95a6556187569e872fce7f9e1c3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53047f93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53047f93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53047f93

Branch: refs/heads/YARN-6592
Commit: 53047f934e3f81237ac9f0d75dddfc44862ef2d9
Parents: a12f09b
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Sep 21 11:16:05 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Sep 21 11:16:05 2017 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 +++----
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 +++----
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 ++++---
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java     | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 +++----
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java    | 7 +++----
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java    | 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java     | 6 +++---
 9 files changed, 28 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index da4ec79..7e98d7d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -49,8 +51,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,8 +83,7 @@ public class BaileyBorweinPlouffe extends Configured implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
+  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 7b73820..8dec39d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,6 +29,8 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -47,8 +49,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for reading
@@ -77,8 +77,7 @@ import org.slf4j.LoggerFactory;
  */
 public class DBCountPageView extends Configured implements Tool {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DBCountPageView.class);
+  private static final Log LOG = LogFactory.getLog(DBCountPageView.class);
   
   private Connection connection;
   private boolean initialized = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
index eef4461..0401a25 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.examples.dancing;
 
 import java.util.*;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 /**
  * A generic solver for tile laying problems using Knuth's dancing link
@@ -35,7 +35,8 @@ import org.slf4j.LoggerFactory;
  * The type parameter ColumnName is the class of application's column names.
  */
 public class DancingLinks<ColumnName> {
-  private static final Logger LOG = LoggerFactory.getLogger(DancingLinks.class);
+  private static final Log LOG = 
+    LogFactory.getLog(DancingLinks.class.getName());
   
   /**
    * A cell in the table with up/down and left/right links that form doubly

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
index ffe63fe..99f7c24 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
@@ -28,6 +28,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Callable;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.examples.pi.math.Summation;
@@ -53,8 +55,6 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The main class for computing sums using map/reduce jobs.
@@ -66,7 +66,7 @@ import org.slf4j.LoggerFactory;
  * a mix-type job may be executed on either side.
  */
 public final class DistSum extends Configured implements Tool {
-  private static final Logger LOG = LoggerFactory.getLogger(DistSum.class);
+  private static final Log LOG = LogFactory.getLog(DistSum.class);
 
   private static final String NAME = DistSum.class.getSimpleName();
   private static final String N_PARTS = "mapreduce.pi." + NAME + ".nParts";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index ef20c4c..53bbdc4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -25,6 +25,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.zip.Checksum;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
@@ -47,8 +49,6 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Generate the official GraySort input data set.
@@ -66,7 +66,7 @@ import org.slf4j.LoggerFactory;
  * <b>bin/hadoop jar hadoop-*-examples.jar teragen 10000000000 in-dir</b>
  */
 public class TeraGen extends Configured implements Tool {
-  private static final Logger LOG = LoggerFactory.getLogger(TeraGen.class);
+  private static final Log LOG = LogFactory.getLog(TeraGen.class);
 
   public enum Counters {CHECKSUM}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
index e0ce36c..96580b1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.examples.terasort;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -36,15 +38,12 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.security.TokenCache;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * An output format that writes the key and value appended together.
  */
 public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TeraOutputFormat.class);
+  private static final Log LOG = LogFactory.getLog(TeraOutputFormat.class);
   private OutputCommitter committer = null;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
index 7a9e44b..3e12a3d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
@@ -21,18 +21,17 @@ package org.apache.hadoop.examples.terasort;
 import java.io.*;
 import java.util.*;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
 class TeraScheduler {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TeraScheduler.class);
+  private static final Log LOG = LogFactory.getLog(TeraScheduler.class);
   private Split[] splits;
   private List<Host> hosts = new ArrayList<Host>();
   private int slotsPerHost;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
index 8b698e3..040d13f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
@@ -23,6 +23,8 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.net.URI;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -36,8 +38,6 @@ import org.apache.hadoop.mapreduce.Partitioner;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Generates the sampled split points, launches the job, and waits for it to
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
  * <b>bin/hadoop jar hadoop-*-examples.jar terasort in-dir out-dir</b>
  */
 public class TeraSort extends Configured implements Tool {
-  private static final Logger LOG = LoggerFactory.getLogger(TeraSort.class);
+  private static final Log LOG = LogFactory.getLog(TeraSort.class);
 
   /**
    * A partitioner that splits text keys into roughly equal partitions

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
index b301659..b835a3b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.examples.terasort;
 import java.io.File;
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.FileAlreadyExistsException;
@@ -27,14 +29,12 @@ import org.apache.hadoop.mapred.HadoopTestCase;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 public class TestTeraSort extends HadoopTestCase {
-  private static final Logger LOG = LoggerFactory.getLogger(TestTeraSort.class);
+  private static Log LOG = LogFactory.getLog(TestTeraSort.class);
   
   public TestTeraSort()
       throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] hadoop git commit: HDFS-12479. Some misuses of lock in DFSStripedOutputStream. Contributed by Huafeng Wang

Posted by as...@apache.org.
HDFS-12479. Some misuses of lock in DFSStripedOutputStream. Contributed by Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dba7a7dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dba7a7dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dba7a7dd

Branch: refs/heads/YARN-6592
Commit: dba7a7dd9d70adfab36a78eb55059c54e553a5cb
Parents: 2018538
Author: Kai Zheng <ka...@intel.com>
Authored: Tue Sep 19 17:45:41 2017 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Tue Sep 19 17:45:41 2017 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dba7a7dd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 44db3a6..66eec7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -63,6 +63,7 @@ import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
@@ -85,11 +86,10 @@ public class DFSStripedOutputStream extends DFSOutputStream
     private final List<BlockingQueue<T>> queues;
 
     MultipleBlockingQueue(int numQueue, int queueSize) {
-      List<BlockingQueue<T>> list = new ArrayList<>(numQueue);
+      queues = new ArrayList<>(numQueue);
       for (int i = 0; i < numQueue; i++) {
-        list.add(new LinkedBlockingQueue<T>(queueSize));
+        queues.add(new LinkedBlockingQueue<T>(queueSize));
       }
-      queues = Collections.synchronizedList(list);
     }
 
     void offer(int i, T object) {
@@ -156,8 +156,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
       followingBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
       endBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
       newBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
-      updateStreamerMap = Collections.synchronizedMap(
-          new HashMap<StripedDataStreamer, Boolean>(numAllBlocks));
+      updateStreamerMap = new ConcurrentHashMap<>(numAllBlocks);
       streamerUpdateResult = new MultipleBlockingQueue<>(numAllBlocks, 1);
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] hadoop git commit: YARN-7149. Cross-queue preemption sometimes starves an underserved queue. (Eric Payne via wangda)

Posted by as...@apache.org.
YARN-7149. Cross-queue preemption sometimes starves an underserved queue. (Eric Payne via wangda)

Change-Id: Ib269991dbebce160378e8372ee6d24849c4a5ed6
(cherry picked from commit 3dfa937a1fadfc62947755872515f549b3b15e6a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38c14ef8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38c14ef8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38c14ef8

Branch: refs/heads/YARN-6592
Commit: 38c14ef8d8a094a7101917eb77d90f5e62324f61
Parents: 958e8c0
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Sep 15 21:25:21 2017 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Sep 15 21:29:39 2017 -0700

----------------------------------------------------------------------
 .../scheduler/capacity/UsersManager.java        |  4 +-
 .../capacity/TestContainerAllocation.java       | 50 ++++++++++++++++++++
 .../scheduler/capacity/TestLeafQueue.java       |  8 ++--
 3 files changed, 57 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c14ef8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
index 5f7d185..33f30b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -731,7 +731,9 @@ public class UsersManager implements AbstractUsersManager {
      * should be higher than queue-hard-limit * ulMin
      */
     float usersSummedByWeight = activeUsersTimesWeights;
-    Resource resourceUsed = totalResUsageForActiveUsers.getUsed(nodePartition);
+    Resource resourceUsed = Resources.add(
+                            totalResUsageForActiveUsers.getUsed(nodePartition),
+                            required);
 
     // For non-activeUser calculation, consider all users count.
     if (!activeUser) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c14ef8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index dd6b25b..906febf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -887,4 +888,53 @@ public class TestContainerAllocation {
 
     rm1.close();
   }
+
+
+
+  @Test(timeout = 60000)
+  public void testUserLimitAllocationMultipleContainers() throws Exception {
+    CapacitySchedulerConfiguration newConf =
+        (CapacitySchedulerConfiguration) TestUtils
+            .getConfigurationWithMultipleQueues(conf);
+    newConf.setUserLimit("root.c", 50);
+    MockRM rm1 = new MockRM(newConf);
+
+    rm1.getRMContext().setNodeLabelManager(mgr);
+    rm1.start();
+    MockNM nm1 = rm1.registerNode("h1:1234", 1000 * GB);
+
+    // launch app from 1st user to queue C, AM container should be launched in nm1
+    RMApp app1 = rm1.submitApp(2 * GB, "app", "user1", null, "c");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
+
+    // launch app from 2nd user to queue C, AM container should be launched in nm1
+    RMApp app2 = rm1.submitApp(2 * GB, "app", "user2", null, "c");
+    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
+
+    // Each application asks 1000 * 5GB containers
+    am1.allocate("*", 5 * GB, 1000, null);
+    am1.allocate("h1", 5 * GB, 1000, null);
+    am1.allocate(NetworkTopology.DEFAULT_RACK, 5 * GB, 1000, null);
+
+    // Each application asks 1000 * 5GB containers
+    am2.allocate("*", 5 * GB, 1000, null);
+    am2.allocate("h1", 5 * GB, 1000, null);
+    am2.allocate(NetworkTopology.DEFAULT_RACK, 5 * GB, 1000, null);
+
+    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
+
+    FiCaSchedulerApp schedulerApp1 =
+        cs.getApplicationAttempt(am1.getApplicationAttemptId());
+    FiCaSchedulerApp schedulerApp2 =
+        cs.getApplicationAttempt(am2.getApplicationAttemptId());
+
+    // container will be allocated to am1
+    // App1 will get 2 container allocated (plus AM container)
+    cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
+    Assert.assertEquals(101, schedulerApp1.getLiveContainers().size());
+    Assert.assertEquals(100, schedulerApp2.getLiveContainers().size());
+
+    rm1.close();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c14ef8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index d45f756..a32352b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -1252,7 +1252,7 @@ public class TestLeafQueue {
     //app4 is user 0
     //maxqueue 16G, userlimit 7G, used 8G, headroom 5G
     //(8G used is 6G from this test case - app4, 2 from last test case, app_1)
-    assertEquals(0*GB, app_4.getHeadroom().getMemorySize());
+    assertEquals(1*GB, app_4.getHeadroom().getMemorySize());
   }
 
   @Test
@@ -1436,7 +1436,7 @@ public class TestLeafQueue {
     assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize());
     assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
     // TODO, fix headroom in the future patch
-    assertEquals(0*GB, app_0.getHeadroom().getMemorySize());
+    assertEquals(1*GB, app_0.getHeadroom().getMemorySize());
       // User limit = 2G, 2 in use
     assertEquals(0*GB, app_1.getHeadroom().getMemorySize());
       // the application is not yet active
@@ -1449,8 +1449,8 @@ public class TestLeafQueue {
     assertEquals(3*GB, a.getUsedResources().getMemorySize());
     assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize());
     assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize());
-    assertEquals(0*GB, app_0.getHeadroom().getMemorySize()); // 4G - 3G
-    assertEquals(0*GB, app_1.getHeadroom().getMemorySize()); // 4G - 3G
+    assertEquals(1*GB, app_0.getHeadroom().getMemorySize()); // 4G - 3G
+    assertEquals(1*GB, app_1.getHeadroom().getMemorySize()); // 4G - 3G
     
     // Submit requests for app_1 and set max-cap
     a.setMaxCapacity(.1f);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] hadoop git commit: YARN-6991. "Kill application" button does not show error if other user tries to kill the application for secure cluster. (Suma Shivaprasad via wangda)

Posted by as...@apache.org.
YARN-6991. "Kill application" button does not show error if other user tries to kill the application for secure cluster. (Suma Shivaprasad via wangda)

Change-Id: I7e7894b24609709f89064ee5882f055dbb09080b


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/263e2c69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/263e2c69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/263e2c69

Branch: refs/heads/YARN-6592
Commit: 263e2c692a4b0013766cd1f6b6d7ed674b2b1040
Parents: b9e423f
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Sep 21 12:00:53 2017 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Sep 21 12:00:53 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/server/webapp/AppBlock.java     | 44 +++++++++++---------
 1 file changed, 24 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/263e2c69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 08e75ac..b429b5d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -137,6 +137,30 @@ public class AppBlock extends HtmlBlock {
 
     setTitle(join("Application ", aid));
 
+    //Validate if able to read application attempts
+    // which should also validate if kill is allowed for the user based on ACLs
+
+    Collection<ApplicationAttemptReport> attempts;
+    try {
+      final GetApplicationAttemptsRequest request =
+          GetApplicationAttemptsRequest.newInstance(appID);
+      attempts = callerUGI.doAs(
+          new PrivilegedExceptionAction<Collection<
+              ApplicationAttemptReport>>() {
+            @Override
+            public Collection<ApplicationAttemptReport> run() throws Exception {
+              return getApplicationAttemptsReport(request);
+            }
+          });
+    } catch (Exception e) {
+      String message =
+          "Failed to read the attempts of the application " + appID + ".";
+      LOG.error(message, e);
+      html.p().__(message).__();
+      return;
+    }
+
+
     // YARN-6890. for secured cluster allow anonymous UI access, application kill
     // shouldn't be there.
     boolean unsecuredUIForSecuredCluster = UserGroupInformation.isSecurityEnabled()
@@ -183,26 +207,6 @@ public class AppBlock extends HtmlBlock {
 
     generateOverviewTable(app, schedulerPath, webUiType, appReport);
 
-    Collection<ApplicationAttemptReport> attempts;
-    try {
-      final GetApplicationAttemptsRequest request =
-          GetApplicationAttemptsRequest.newInstance(appID);
-      attempts = callerUGI.doAs(
-          new PrivilegedExceptionAction<Collection<
-              ApplicationAttemptReport>>() {
-            @Override
-            public Collection<ApplicationAttemptReport> run() throws Exception {
-              return getApplicationAttemptsReport(request);
-            }
-          });
-    } catch (Exception e) {
-      String message =
-          "Failed to read the attempts of the application " + appID + ".";
-      LOG.error(message, e);
-      html.p().__(message).__();
-      return;
-    }
-
     createApplicationMetricsTable(html);
 
     html.__(InfoBlock.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe

Posted by as...@apache.org.
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a20debd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a20debd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a20debd

Branch: refs/heads/YARN-6592
Commit: 3a20debddeac69596ceb5b36f8413529ea8570e6
Parents: ea845ba
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 19 09:13:17 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 19 09:13:17 2017 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a20debd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..b7f2c6d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
         return;
       }
 
-      // this audit log is disabled by default,
-      // to turn it on please enable this audit log
-      // on log4j.properties by uncommenting the setting
-      if (AUDITLOG.isDebugEnabled()) {
-        AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
-                         " reducer " + reduceQ.get(0));
-      }
       int reduceId;
       String jobId;
       try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
 
       // Now set the response headers.
       setResponseHeaders(response, keepAliveParam, contentLength);
+
+      // this audit log is disabled by default,
+      // to turn it on please enable this audit log
+      // on log4j.properties by uncommenting the setting
+      if (AUDITLOG.isDebugEnabled()) {
+        StringBuilder sb = new StringBuilder("shuffle for ");
+        sb.append(jobId).append(" reducer ").append(reduce);
+        sb.append(" length ").append(contentLength);
+        sb.append(" mappers: ").append(mapIds);
+        AUDITLOG.debug(sb.toString());
+      }
     }
 
     protected void setResponseHeaders(HttpResponse response,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] hadoop git commit: HDFS-11035. Better documentation for maintenace mode and upgrade domain.

Posted by as...@apache.org.
HDFS-11035. Better documentation for maintenace mode and upgrade domain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce943eb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce943eb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce943eb1

Branch: refs/heads/YARN-6592
Commit: ce943eb17a4218d8ac1f5293c6726122371d8442
Parents: 230b85d
Author: Ming Ma <mi...@twitter.com>
Authored: Wed Sep 20 09:36:33 2017 -0700
Committer: Ming Ma <mi...@twitter.com>
Committed: Wed Sep 20 09:36:33 2017 -0700

----------------------------------------------------------------------
 .../src/site/markdown/HdfsDataNodeAdminGuide.md | 165 ++++++++++++++++++
 .../src/site/markdown/HdfsUpgradeDomain.md      | 167 +++++++++++++++++++
 hadoop-project/src/site/site.xml                |   4 +-
 3 files changed, 335 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce943eb1/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
new file mode 100644
index 0000000..d6f288e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
@@ -0,0 +1,165 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+HDFS DataNode Admin Guide
+=================
+
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
+Overview
+--------
+
+The Hadoop Distributed File System (HDFS) namenode maintains states of all datanodes.
+There are two types of states. The fist type describes the liveness of a datanode indicating if
+the node is live, dead or stale. The second type describes the admin state indicating if the node
+is in service, decommissioned or under maintenance.
+
+When an administrator decommission a datanode, the datanode will first be transitioned into
+`DECOMMISSION_INPROGRESS` state. After all blocks belonging to that datanode have been fully replicated elsewhere
+based on each block's replication factor. the datanode will be transitioned to `DECOMMISSIONED` state. After that,
+the administrator can shutdown the node to perform long-term repair and maintenance that could take days or weeks.
+After the machine has been repaired, the machine can be recommissioned back to the cluster.
+
+Sometimes administrators only need to take datanodes down for minutes/hours to perform short-term repair/maintenance.
+In such scenario, the HDFS block replication overhead incurred by decommission might not be necessary and a light-weight process is desirable.
+And that is what maintenance state is used for. When an administrator put a datanode in maintenance state, the datanode will first be transitioned
+to `ENTERING_MAINTENANCE` state. As long as all blocks belonging to that datanode is minimally replicated elsewhere, the datanode
+will immediately be transitioned to `IN_MAINTENANCE` state. After the maintenance has completed, the administrator can take the datanode
+out of the maintenance state. In addition, maintenance state supports timeout that allows administrators to config the maximum duration in
+which a datanode is allowed to stay in maintenance state. After the timeout, the datanode will be transitioned out of maintenance state
+automatically by HDFS without human intervention.
+
+In summary, datanode admin operations include the followings:
+
+* Decommission
+* Recommission
+* Putting nodes in maintenance state
+* Taking nodes out of maintenance state
+
+And datanode admin states include the followings:
+
+* `NORMAL` The node is in service.
+* `DECOMMISSIONED` The node has been decommissioned.
+* `DECOMMISSION_INPROGRESS` The node is being transitioned to DECOMMISSIONED state.
+* `IN_MAINTENANCE` The node in in maintenance state.
+* `ENTERING_MAINTENANCE` The node is being transitioned to maintenance state.
+
+
+Host-level settings
+-----------
+
+To perform any of datanode admin operations, there are two steps.
+
+* Update host-level configuration files to indicate the desired admin states of targeted datanodes. There are two supported formats for configuration files.
+    * Hostname-only configuration. Each line includes the hostname/ip address for a datanode. That is the default format.
+    * JSON-based configuration. The configuration is in JSON format. Each element maps to one datanode and each datanode can have multiple properties. This format is required to put datanodes to maintenance states.
+
+* Run the following command to have namenode reload the host-level configuration files.
+`hdfs dfsadmin [-refreshNodes]`
+
+### Hostname-only configuration
+This is the default configuration used by the namenode. It only supports node decommission and recommission; it doesn't support admin operations related to maintenance state. Use `dfs.hosts` and `dfs.hosts.exclude` as explained in [hdfs-default.xml](./hdfs-default.xml).
+
+In the following example, `host1` and `host2` need to be in service.
+`host3` and `host4` need to be in decommissioned state.
+
+dfs.hosts file
+```text
+host1
+host2
+host3
+host4
+```
+dfs.hosts.exclude file
+```text
+host3
+host4
+```
+
+### JSON-based configuration
+
+JSON-based format is the new configuration format that supports generic properties on datanodes. Set the following
+configurations to enable JSON-based format as explained in [hdfs-default.xml](./hdfs-default.xml).
+
+
+| Setting | Value |
+|:---- |:---- |
+|`dfs.namenode.hosts.provider.classname`| `org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager`|
+|`dfs.hosts`| the path of the json hosts file |
+
+Here is the list of currently supported properties by HDFS.
+
+
+| Property | Description |
+|:---- |:---- |
+|`hostName`| Required. The host name of the datanode. |
+|`upgradeDomain`| Optional. The upgrade domain id of the datanode. |
+|`adminState`| Optional. The expected admin state. The default value is `NORMAL`; `DECOMMISSIONED` for decommission; `IN_MAINTENANCE` for maintenance state. |
+|`port`| Optional. the port number of the datanode |
+|`maintenanceExpireTimeInMS`| Optional. The epoch time in milliseconds until which the datanode will remain in maintenance state. The default value is forever. |
+
+In the following example, `host1` and `host2` need to in service. `host3` need to be in decommissioned state. `host4` need to be in in maintenance state.
+
+dfs.hosts file
+```json
+[
+  {
+    "hostName": "host1"
+  },
+  {
+    "hostName": "host2",
+    "upgradeDomain": "ud0"
+  },
+  {
+    "hostName": "host3",
+    "adminState": "DECOMMISSIONED"
+  },
+  {
+    "hostName": "host4",
+    "upgradeDomain": "ud2",
+    "adminState": "IN_MAINTENANCE"
+  }
+]
+```
+
+
+Cluster-level settings
+-----------
+
+There are several cluster-level settings related to datanode administration.
+For common use cases, you should rely on the default values. Please refer to
+[hdfs-default.xml](./hdfs-default.xml) for descriptions and default values.
+
+```text
+dfs.namenode.maintenance.replication.min
+dfs.namenode.decommission.interval
+dfs.namenode.decommission.blocks.per.interval
+dfs.namenode.decommission.max.concurrent.tracked.nodes
+```
+
+Metrics
+-----------
+
+Admin states are part of the namenode's webUI and JMX. As explained in [HDFSCommands.html](./HDFSCommands.html), you can also verify admin states using the following commands.
+
+Use `dfsadmin` to check admin states at the cluster level.
+
+`hdfs dfsadmin -report`
+
+Use `fsck` to check admin states of datanodes storing data at a specific path. For backward compatibility, a special flag is required to return maintenance states.
+
+```text
+hdfs fsck <path> // only show decommission state
+hdfs fsck <path> -maintenance // include maintenance state
+```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce943eb1/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUpgradeDomain.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUpgradeDomain.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUpgradeDomain.md
new file mode 100644
index 0000000..15a4bae
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUpgradeDomain.md
@@ -0,0 +1,167 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+HDFS Upgrade Domain
+====================
+
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
+
+Introduction
+------------
+
+The current default HDFS block placement policy guarantees that a block’s 3 replicas will be placed
+on at least 2 racks. Specifically one replica is placed on one rack and the other two replicas
+are placed on another rack during write pipeline. This is a good compromise between rack diversity and write-pipeline efficiency. Note that
+subsequent load balancing or machine membership change might cause 3 replicas of a block to be distributed
+across 3 different racks. Thus any 3 datanodes in different racks could store 3 replicas of a block.
+
+
+However, the default placement policy impacts how we should perform datanode rolling upgrade.
+[HDFS Rolling Upgrade document](./HdfsRollingUpgrade.html) explains how the datanodes can be upgraded in a rolling
+fashion without downtime. Because any 3 datanodes in different racks could store all the replicas of a block, it is
+important to perform sequential restart of datanodes one at a time in order to minimize the impact on data availability
+and read/write operations. Upgrading one rack at a time is another option; but that will increase the chance of
+data unavailability if there is machine failure at another rack during the upgrade.
+
+The side effect of this sequential datanode rolling upgrade strategy is longer
+upgrade duration for larger clusters.
+
+
+Architecture
+-------
+
+To address the limitation of block placement policy on rolling upgrade, the concept of upgrade domain
+has been added to HDFS via a new block placement policy. The idea is to group datanodes in a new
+dimension called upgrade domain, in addition to the existing rack-based grouping.
+For example, we can assign all datanodes in the first position of any rack to upgrade domain ud_01,
+nodes in the second position to upgrade domain ud_02 and so on.
+
+The namenode provides BlockPlacementPolicy interface to support any custom block placement besides
+the default block placement policy. A new upgrade domain block placement policy based on this interface
+is available in HDFS. It will make sure replicas of any given block are distributed across machines from different upgrade domains.
+By default, 3 replicas of any given block are placed on 3 different upgrade domains. This means all datanodes belonging to
+a specific upgrade domain collectively won't store more than one replica of any block.
+
+With upgrade domain block placement policy in place, we can upgrade all datanodes belonging to one upgrade domain at the
+same time without impacting data availability. Only after finishing upgrading one upgrade domain we move to the next
+upgrade domain until all upgrade domains have been upgraded. Such procedure will ensure no two replicas of any given
+block will be upgraded at the same time. This means we can upgrade many machines at the same time for a large cluster.
+And as the cluster continues to scale, new machines will be added to the existing upgrade domains without impact the
+parallelism of the upgrade.
+
+For an existing cluster with the default block placement policy, after switching to the new upgrade domain block
+placement policy, any newly created blocks will conform the new policy. The old blocks allocated based on the old policy
+need to migrated the new policy. There is a migrator tool you can use. See HDFS-8789 for details.
+
+
+Settings
+-------
+
+To enable upgrade domain on your clusters, please follow these steps:
+
+* Assign datanodes to individual upgrade domain groups.
+* Enable upgrade domain block placement policy.
+* Migrate blocks allocated based on old block placement policy to the new upgrade domain policy.
+
+### Upgrade domain id assignment
+
+How a datanode maps to an upgrade domain id is defined by administrators and specific to the cluster layout.
+A common way to use the rack position of the machine as its upgrade domain id.
+
+To configure mapping from host name to its upgrade domain id, we need to use json-based host configuration file.
+by setting the following property as explained in [hdfs-default.xml](./hdfs-default.xml).
+
+| Setting | Value |
+|:---- |:---- |
+|`dfs.namenode.hosts.provider.classname` | `org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager`|
+|`dfs.hosts`| the path of the json hosts file |
+
+The json hosts file defines the property for all hosts. In the following example,
+there are 4 datanodes in 2 racks; the machines at rack position 01 belong to upgrade domain 01;
+the machines at rack position 02 belong to upgrade domain 02.
+
+```json
+[
+  {
+    "hostName": "dcA­rackA­01",
+    "upgradeDomain": "01"
+  },
+  {
+    "hostName": "dcA­rackA­02",
+    "upgradeDomain": "02"
+  },
+  {
+    "hostName": "dcA­rackB­01",
+    "upgradeDomain": "01"
+  },
+  {
+    "hostName": "dcA­rackB­02",
+    "upgradeDomain": "02"
+  }
+]
+```
+
+
+### Enable upgrade domain block placement policy
+
+After each datanode has been assigned an upgrade domain id, the next step is to enable
+upgrade domain block placement policy with the following configuration as explained in [hdfs-default.xml](./hdfs-default.xml).
+
+| Setting | Value |
+|:---- |:---- |
+|`dfs.block.replicator.classname`| `org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithUpgradeDomain` |
+
+After restarting of namenode, the new policy will be used for any new block allocation.
+
+
+### Migration
+
+If you change the block placement policy of an existing cluster, you will need to make sure the
+blocks allocated prior to the block placement policy change conform the new block placement policy.
+
+HDFS-8789 provides the initial draft patch of a client-side migration tool. After the tool is committed,
+we will be able to describe how to use the tool.
+
+
+Rolling restart based on upgrade domains
+-------
+
+During cluster administration, we might need to restart datanodes to pick up new configuration, new hadoop release
+or JVM version and so on. With upgrade domains enabled and all blocks on the cluster conform to the new policy, we can now
+restart datanodes in batches, one upgrade domain at a time. Whether it is manual process or via automation, the steps are
+
+* Group datanodes by upgrade domains based on dfsadmin or JMX's datanode information.
+* For each upgrade domain
+    * (Optional) put all the nodes in that upgrade domain to maintenance state (refer to [HdfsDataNodeAdminGuide.html](./HdfsDataNodeAdminGuide.html)).
+    * Restart all those nodes.
+    * Check if all datanodes are healthy after restart. Unhealthy nodes should be decommissioned.
+    * (Optional) Take all those nodes out of maintenance state.
+
+
+Metrics
+-----------
+
+Upgrade domains are part of namenode's JMX. As explained in [HDFSCommands.html](./HDFSCommands.html), you can also verify upgrade domains using the following commands.
+
+Use `dfsadmin` to check upgrade domains at the cluster level.
+
+`hdfs dfsadmin -report`
+
+Use `fsck` to check upgrade domains of datanodes storing data at a specific path.
+
+`hdfs fsck <path> -files -blocks -upgradedomains`

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce943eb1/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 4685e2a..a88f0e3 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -101,7 +101,9 @@
       <item name="Synthetic Load Generator" href="hadoop-project-dist/hadoop-hdfs/SLGUserGuide.html"/>
       <item name="Erasure Coding" href="hadoop-project-dist/hadoop-hdfs/HDFSErasureCoding.html"/>
       <item name="Disk Balancer" href="hadoop-project-dist/hadoop-hdfs/HDFSDiskbalancer.html"/>
-   </menu>
+      <item name="Upgrade Domain" href="hadoop-project-dist/hadoop-hdfs/HdfsUpgradeDomain.html"/>
+      <item name="DataNode Admin" href="hadoop-project-dist/hadoop-hdfs/HdfsDataNodeAdminGuide.html"/>
+    </menu>
 
     <menu name="MapReduce" inherit="top">
       <item name="Tutorial" href="hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html"/>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] hadoop git commit: YARN-6968. Hardcoded absolute pathname in DockerLinuxContainerRuntime. Contributed by Eric Badger

Posted by as...@apache.org.
YARN-6968. Hardcoded absolute pathname in DockerLinuxContainerRuntime. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10d74935
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10d74935
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10d74935

Branch: refs/heads/YARN-6592
Commit: 10d7493587643b52cee5fde87eca9ef99c422a70
Parents: 50849ec
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Sep 21 08:38:08 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Sep 21 08:38:08 2017 -0500

----------------------------------------------------------------------
 .../linux/resources/CGroupsHandler.java                |  6 ++++++
 .../linux/resources/CGroupsHandlerImpl.java            |  5 +++++
 .../linux/runtime/DockerLinuxContainerRuntime.java     | 13 ++++++++-----
 .../linux/runtime/TestDockerContainerRuntime.java      | 13 ++++++++++---
 4 files changed, 29 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10d74935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
index 82bd366..5f4d3e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
@@ -173,4 +173,10 @@ public interface CGroupsHandler {
    */
   String getCGroupParam(CGroupController controller, String cGroupId,
       String param) throws ResourceHandlerException;
+
+  /**
+   * Returns CGroup Mount Path.
+   * @return parameter value as read from the parameter file
+   */
+  String getCGroupMountPath();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10d74935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index f37dfd3..1c6385d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -603,4 +603,9 @@ class CGroupsHandlerImpl implements CGroupsHandler {
           "Unable to read from " + cGroupParamPath);
     }
   }
+
+  @Override
+  public String getCGroupMountPath() {
+    return cGroupMountPath;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10d74935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 8217564..1ccd365 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -167,13 +167,12 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS =
       "YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS";
 
-  static final String CGROUPS_ROOT_DIRECTORY = "/sys/fs/cgroup";
-
   private Configuration conf;
   private DockerClient dockerClient;
   private PrivilegedOperationExecutor privilegedOperationExecutor;
   private Set<String> allowedNetworks = new HashSet<>();
   private String defaultNetwork;
+  private String cgroupsRootDirectory;
   private CGroupsHandler cGroupsHandler;
   private AccessControlList privilegedContainersAcl;
 
@@ -228,6 +227,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       LOG.info("cGroupsHandler is null - cgroups not in use.");
     } else {
       this.cGroupsHandler = cGroupsHandler;
+      this.cgroupsRootDirectory = cGroupsHandler.getCGroupMountPath();
     }
   }
 
@@ -486,9 +486,12 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         .setContainerWorkDir(containerWorkDir.toString())
         .setNetworkType(network);
     setHostname(runCommand, containerIdStr, hostname);
-    runCommand.setCapabilities(capabilities)
-        .addMountLocation(CGROUPS_ROOT_DIRECTORY,
-            CGROUPS_ROOT_DIRECTORY + ":ro", false);
+    runCommand.setCapabilities(capabilities);
+
+    if(cgroupsRootDirectory != null) {
+      runCommand.addMountLocation(cgroupsRootDirectory,
+          cgroupsRootDirectory + ":ro", false);
+    }
 
     List<String> allDirs = new ArrayList<>(containerLocalDirs);
     allDirs.addAll(filecacheDirs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10d74935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index d57d33c..3f4bb5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileg
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRunCommand;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants;
@@ -261,12 +262,18 @@ public class TestDockerContainerRuntime {
   }
 
   private String getExpectedCGroupsMountString() {
+    CGroupsHandler cgroupsHandler = ResourceHandlerModule.getCGroupsHandler();
+    if(cgroupsHandler == null) {
+      return "";
+    }
+
+    String cgroupMountPath = cgroupsHandler.getCGroupMountPath();
     boolean cGroupsMountExists = new File(
-        DockerLinuxContainerRuntime.CGROUPS_ROOT_DIRECTORY).exists();
+        cgroupMountPath).exists();
 
     if(cGroupsMountExists) {
-      return "-v " + DockerLinuxContainerRuntime.CGROUPS_ROOT_DIRECTORY
-          + ":" + DockerLinuxContainerRuntime.CGROUPS_ROOT_DIRECTORY + ":ro ";
+      return "-v " + cgroupMountPath
+          + ":" + cgroupMountPath + ":ro ";
     } else {
       return "";
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] hadoop git commit: YARN-7186. Fix finicky TestContainerManager tests. Contributed by Arun Suresh.

Posted by as...@apache.org.
YARN-7186. Fix finicky TestContainerManager tests. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/647b7527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/647b7527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/647b7527

Branch: refs/heads/YARN-6592
Commit: 647b7527a9cdf4717e7dcbbb660e5812b67a17f1
Parents: 12d9d7b
Author: Junping Du <ju...@apache.org>
Authored: Tue Sep 19 18:31:15 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Tue Sep 19 18:31:15 2017 -0700

----------------------------------------------------------------------
 .../containermanager/TestContainerManager.java  | 128 -------------------
 .../TestContainerSchedulerQueuing.java          |  70 ++++++++++
 2 files changed, 70 insertions(+), 128 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/647b7527/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 6eea77b..38df208 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -70,7 +70,6 @@ import org.apache.hadoop.yarn.api.records.ContainerRetryContext;
 import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -105,7 +104,6 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
@@ -142,14 +140,6 @@ public class TestContainerManager extends BaseContainerManagerTest {
     exec.setConf(conf);
     return spy(exec);
   }
-
-  @Override
-  @Before
-  public void setup() throws IOException {
-    conf.setInt(
-        YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 0);
-    super.setup();
-  }
   
   @Override
   protected ContainerManagerImpl
@@ -1945,122 +1935,4 @@ public class TestContainerManager extends BaseContainerManagerTest {
     Assert.assertTrue(response.getFailedRequests().get(cId).getMessage()
         .contains("Null resource visibility for local resource"));
   }
-
-  @Test
-  public void testContainerUpdateExecTypeOpportunisticToGuaranteed()
-      throws IOException, YarnException, InterruptedException {
-    delayContainers = true;
-    containerManager.start();
-    // Construct the Container-id
-    ContainerId cId = createContainerId(0);
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
-    StartContainerRequest scRequest =
-        StartContainerRequest.newInstance(
-            containerLaunchContext,
-            createContainerToken(cId, DUMMY_RM_IDENTIFIER,
-                context.getNodeId(), user, BuilderUtils.newResource(512, 1),
-                context.getContainerTokenSecretManager(), null,
-                ExecutionType.OPPORTUNISTIC));
-    List<StartContainerRequest> list = new ArrayList<>();
-    list.add(scRequest);
-    StartContainersRequest allRequests =
-        StartContainersRequest.newInstance(list);
-    containerManager.startContainers(allRequests);
-    // Make sure the container reaches RUNNING state
-    BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
-        org.apache.hadoop.yarn.server.nodemanager.
-            containermanager.container.ContainerState.RUNNING);
-    // Construct container resource increase request,
-    List<Token> updateTokens = new ArrayList<>();
-    Token containerToken =
-        createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(),
-            user, BuilderUtils.newResource(512, 1),
-            context.getContainerTokenSecretManager(), null,
-            ExecutionType.GUARANTEED);
-    updateTokens.add(containerToken);
-    ContainerUpdateRequest updateRequest =
-        ContainerUpdateRequest.newInstance(updateTokens);
-    ContainerUpdateResponse updateResponse =
-        containerManager.updateContainer(updateRequest);
-
-    Assert.assertEquals(
-        1, updateResponse.getSuccessfullyUpdatedContainers().size());
-    Assert.assertTrue(updateResponse.getFailedRequests().isEmpty());
-
-    //Make sure the container is running
-    List<ContainerId> statList = new ArrayList<ContainerId>();
-    statList.add(cId);
-    GetContainerStatusesRequest statRequest =
-        GetContainerStatusesRequest.newInstance(statList);
-    List<ContainerStatus> containerStatuses = containerManager
-        .getContainerStatuses(statRequest).getContainerStatuses();
-    Assert.assertEquals(1, containerStatuses.size());
-    for (ContainerStatus status : containerStatuses) {
-      Assert.assertEquals(
-          org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
-          status.getState());
-      Assert.assertEquals(ExecutionType.GUARANTEED, status.getExecutionType());
-    }
-  }
-
-  @Test
-  public void testContainerUpdateExecTypeGuaranteedToOpportunistic()
-      throws IOException, YarnException, InterruptedException {
-    delayContainers = true;
-    containerManager.start();
-    // Construct the Container-id
-    ContainerId cId = createContainerId(0);
-    ContainerLaunchContext containerLaunchContext =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
-    StartContainerRequest scRequest =
-        StartContainerRequest.newInstance(
-            containerLaunchContext,
-            createContainerToken(cId, DUMMY_RM_IDENTIFIER,
-                context.getNodeId(), user, BuilderUtils.newResource(512, 1),
-                context.getContainerTokenSecretManager(), null));
-    List<StartContainerRequest> list = new ArrayList<>();
-    list.add(scRequest);
-    StartContainersRequest allRequests =
-        StartContainersRequest.newInstance(list);
-    containerManager.startContainers(allRequests);
-    // Make sure the container reaches RUNNING state
-    BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
-        org.apache.hadoop.yarn.server.nodemanager.
-            containermanager.container.ContainerState.RUNNING);
-    // Construct container resource increase request,
-    List<Token> updateTokens = new ArrayList<>();
-    Token containerToken =
-        createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(),
-            user, BuilderUtils.newResource(512, 1),
-            context.getContainerTokenSecretManager(), null,
-            ExecutionType.OPPORTUNISTIC);
-    updateTokens.add(containerToken);
-    ContainerUpdateRequest updateRequest =
-        ContainerUpdateRequest.newInstance(updateTokens);
-    ContainerUpdateResponse updateResponse =
-        containerManager.updateContainer(updateRequest);
-
-    Assert.assertEquals(
-        1, updateResponse.getSuccessfullyUpdatedContainers().size());
-    Assert.assertTrue(updateResponse.getFailedRequests().isEmpty());
-
-    //Make sure the container is running
-    List<ContainerId> statList = new ArrayList<ContainerId>();
-    statList.add(cId);
-    GetContainerStatusesRequest statRequest =
-        GetContainerStatusesRequest.newInstance(statList);
-    List<ContainerStatus> containerStatuses = containerManager
-        .getContainerStatuses(statRequest).getContainerStatuses();
-    Assert.assertEquals(1, containerStatuses.size());
-    for (ContainerStatus status : containerStatuses) {
-      Assert.assertEquals(
-          org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
-          status.getState());
-      Assert
-          .assertEquals(ExecutionType.OPPORTUNISTIC, status.getExecutionType());
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/647b7527/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index f3fc724..7c74049 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -1147,7 +1147,77 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
     waitForContainerState(containerManager, createContainerId(1),
         org.apache.hadoop.yarn.api.records.ContainerState.RUNNING);
 
+    containerStatuses = containerManager
+        .getContainerStatuses(statRequest).getContainerStatuses();
+    Assert.assertEquals(1, containerStatuses.size());
+    for (ContainerStatus status : containerStatuses) {
+      if (org.apache.hadoop.yarn.api.records.ContainerState.RUNNING ==
+          status.getState()) {
+        Assert.assertEquals(
+            ExecutionType.GUARANTEED, status.getExecutionType());
+      }
+    }
+
     // Ensure no containers are queued.
     Assert.assertEquals(0, containerScheduler.getNumQueuedContainers());
   }
+
+  @Test
+  public void testContainerUpdateExecTypeGuaranteedToOpportunistic()
+      throws IOException, YarnException, InterruptedException {
+    delayContainers = true;
+    containerManager.start();
+    // Construct the Container-id
+    ContainerId cId = createContainerId(0);
+    ContainerLaunchContext containerLaunchContext =
+        recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+    StartContainerRequest scRequest =
+        StartContainerRequest.newInstance(
+            containerLaunchContext,
+            createContainerToken(cId, DUMMY_RM_IDENTIFIER,
+                context.getNodeId(), user, BuilderUtils.newResource(512, 1),
+                context.getContainerTokenSecretManager(), null));
+    List<StartContainerRequest> list = new ArrayList<>();
+    list.add(scRequest);
+    StartContainersRequest allRequests =
+        StartContainersRequest.newInstance(list);
+    containerManager.startContainers(allRequests);
+    // Make sure the container reaches RUNNING state
+    BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
+        org.apache.hadoop.yarn.server.nodemanager.
+            containermanager.container.ContainerState.RUNNING);
+    // Construct container resource increase request,
+    List<Token> updateTokens = new ArrayList<>();
+    Token containerToken =
+        createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(),
+            user, BuilderUtils.newResource(512, 1),
+            context.getContainerTokenSecretManager(), null,
+            ExecutionType.OPPORTUNISTIC);
+    updateTokens.add(containerToken);
+    ContainerUpdateRequest updateRequest =
+        ContainerUpdateRequest.newInstance(updateTokens);
+    ContainerUpdateResponse updateResponse =
+        containerManager.updateContainer(updateRequest);
+
+    Assert.assertEquals(
+        1, updateResponse.getSuccessfullyUpdatedContainers().size());
+    Assert.assertTrue(updateResponse.getFailedRequests().isEmpty());
+
+    //Make sure the container is running
+    List<ContainerId> statList = new ArrayList<ContainerId>();
+    statList.add(cId);
+    GetContainerStatusesRequest statRequest =
+        GetContainerStatusesRequest.newInstance(statList);
+    List<ContainerStatus> containerStatuses = containerManager
+        .getContainerStatuses(statRequest).getContainerStatuses();
+    Assert.assertEquals(1, containerStatuses.size());
+    for (ContainerStatus status : containerStatuses) {
+      Assert.assertEquals(
+          org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+          status.getState());
+      Assert
+          .assertEquals(ExecutionType.OPPORTUNISTIC, status.getExecutionType());
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] hadoop git commit: HDFS-12496. Make QuorumJournalManager timeout properties configurable. Contributed by Ajay Kumar.

Posted by as...@apache.org.
HDFS-12496. Make QuorumJournalManager timeout properties configurable. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9e423fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9e423fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9e423fa

Branch: refs/heads/YARN-6592
Commit: b9e423fa8d30ea89244f6ec018a8064cc87d94a9
Parents: b9db0af
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Sep 21 08:44:43 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Sep 21 08:44:43 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../qjournal/client/QuorumJournalManager.java   | 39 +++++++++-----------
 .../src/main/resources/hdfs-default.xml         | 11 ++++++
 3 files changed, 33 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e423fa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 91f3bb9..b4842f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -725,6 +725,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.edit.log.transfer.bandwidthPerSec";
   public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no throttling
 
+  public static final String DFS_QJM_OPERATIONS_TIMEOUT =
+      "dfs.qjm.operations.timeout";
+  public static final long DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT = 60000;
+
   // Datanode File IO Stats
   public static final String DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY =
       "dfs.datanode.enable.fileio.fault.injection";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e423fa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 97c0050..f66e2c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.PriorityQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
@@ -76,18 +77,10 @@ public class QuorumJournalManager implements JournalManager {
   private final int newEpochTimeoutMs;
   private final int writeTxnsTimeoutMs;
 
-  // Since these don't occur during normal operation, we can
-  // use rather lengthy timeouts, and don't need to make them
-  // configurable.
-  private static final int FORMAT_TIMEOUT_MS            = 60000;
-  private static final int HASDATA_TIMEOUT_MS           = 60000;
-  private static final int CAN_ROLL_BACK_TIMEOUT_MS     = 60000;
-  private static final int FINALIZE_TIMEOUT_MS          = 60000;
-  private static final int PRE_UPGRADE_TIMEOUT_MS       = 60000;
-  private static final int ROLL_BACK_TIMEOUT_MS         = 60000;
-  private static final int DISCARD_SEGMENTS_TIMEOUT_MS  = 60000;
-  private static final int UPGRADE_TIMEOUT_MS           = 60000;
-  private static final int GET_JOURNAL_CTIME_TIMEOUT_MS = 60000;
+  // This timeout is used for calls that don't occur during normal operation
+  // e.g. format, upgrade operations and a few others. So we can use rather
+  // lengthy timeouts by default.
+  private final int timeoutMs;
   
   private final Configuration conf;
   private final URI uri;
@@ -141,6 +134,10 @@ public class QuorumJournalManager implements JournalManager {
     this.writeTxnsTimeoutMs = conf.getInt(
         DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY,
         DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT);
+    this.timeoutMs = (int) conf.getTimeDuration(DFSConfigKeys
+            .DFS_QJM_OPERATIONS_TIMEOUT,
+        DFSConfigKeys.DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT, TimeUnit
+            .MILLISECONDS);
   }
   
   protected List<AsyncLogger> createLoggers(
@@ -201,7 +198,7 @@ public class QuorumJournalManager implements JournalManager {
   public void format(NamespaceInfo nsInfo) throws IOException {
     QuorumCall<AsyncLogger,Void> call = loggers.format(nsInfo);
     try {
-      call.waitFor(loggers.size(), loggers.size(), 0, FORMAT_TIMEOUT_MS,
+      call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
           "format");
     } catch (InterruptedException e) {
       throw new IOException("Interrupted waiting for format() response");
@@ -220,7 +217,7 @@ public class QuorumJournalManager implements JournalManager {
         loggers.isFormatted();
 
     try {
-      call.waitFor(loggers.size(), 0, 0, HASDATA_TIMEOUT_MS, "hasSomeData");
+      call.waitFor(loggers.size(), 0, 0, timeoutMs, "hasSomeData");
     } catch (InterruptedException e) {
       throw new IOException("Interrupted while determining if JNs have data");
     } catch (TimeoutException e) {
@@ -505,7 +502,7 @@ public class QuorumJournalManager implements JournalManager {
   public void doPreUpgrade() throws IOException {
     QuorumCall<AsyncLogger, Void> call = loggers.doPreUpgrade();
     try {
-      call.waitFor(loggers.size(), loggers.size(), 0, PRE_UPGRADE_TIMEOUT_MS,
+      call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
           "doPreUpgrade");
       
       if (call.countExceptions() > 0) {
@@ -522,7 +519,7 @@ public class QuorumJournalManager implements JournalManager {
   public void doUpgrade(Storage storage) throws IOException {
     QuorumCall<AsyncLogger, Void> call = loggers.doUpgrade(storage);
     try {
-      call.waitFor(loggers.size(), loggers.size(), 0, UPGRADE_TIMEOUT_MS,
+      call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
           "doUpgrade");
       
       if (call.countExceptions() > 0) {
@@ -539,7 +536,7 @@ public class QuorumJournalManager implements JournalManager {
   public void doFinalize() throws IOException {
     QuorumCall<AsyncLogger, Void> call = loggers.doFinalize();
     try {
-      call.waitFor(loggers.size(), loggers.size(), 0, FINALIZE_TIMEOUT_MS,
+      call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
           "doFinalize");
       
       if (call.countExceptions() > 0) {
@@ -558,7 +555,7 @@ public class QuorumJournalManager implements JournalManager {
     QuorumCall<AsyncLogger, Boolean> call = loggers.canRollBack(storage,
         prevStorage, targetLayoutVersion);
     try {
-      call.waitFor(loggers.size(), loggers.size(), 0, CAN_ROLL_BACK_TIMEOUT_MS,
+      call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
           "lockSharedStorage");
       
       if (call.countExceptions() > 0) {
@@ -591,7 +588,7 @@ public class QuorumJournalManager implements JournalManager {
   public void doRollback() throws IOException {
     QuorumCall<AsyncLogger, Void> call = loggers.doRollback();
     try {
-      call.waitFor(loggers.size(), loggers.size(), 0, ROLL_BACK_TIMEOUT_MS,
+      call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
           "doRollback");
       
       if (call.countExceptions() > 0) {
@@ -609,7 +606,7 @@ public class QuorumJournalManager implements JournalManager {
     QuorumCall<AsyncLogger, Void> call = loggers.discardSegments(startTxId);
     try {
       call.waitFor(loggers.size(), loggers.size(), 0,
-          DISCARD_SEGMENTS_TIMEOUT_MS, "discardSegments");
+          timeoutMs, "discardSegments");
       if (call.countExceptions() > 0) {
         call.rethrowException(
             "Could not perform discardSegments of one or more JournalNodes");
@@ -628,7 +625,7 @@ public class QuorumJournalManager implements JournalManager {
     QuorumCall<AsyncLogger, Long> call = loggers.getJournalCTime();
     try {
       call.waitFor(loggers.size(), loggers.size(), 0,
-          GET_JOURNAL_CTIME_TIMEOUT_MS, "getJournalCTime");
+          timeoutMs, "getJournalCTime");
       
       if (call.countExceptions() > 0) {
         call.rethrowException("Could not journal CTime for one "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e423fa/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9327a2c..4e4ee33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4629,4 +4629,15 @@
     </description>
   </property>
 
+  <property>
+    <name>dfs.qjm.operations.timeout</name>
+    <value>60s</value>
+    <description>
+      Common key to set timeout for related operations in
+      QuorumJournalManager. This setting supports multiple time unit suffixes
+      as described in dfs.heartbeat.interval.
+      If no suffix is specified then milliseconds is assumed.
+    </description>
+  </property>
+
 </configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] hadoop git commit: HADOOP-7308. Remove unused TaskLogAppender configurations from log4j.properties. Contributed by Todd Lipcon and J.Andreina.

Posted by as...@apache.org.
HADOOP-7308. Remove unused TaskLogAppender configurations from log4j.properties. Contributed by Todd Lipcon and J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e58b247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e58b247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e58b247

Branch: refs/heads/YARN-6592
Commit: 7e58b2478ce10f54b9b9a647f22a69dd528a81e6
Parents: a9019e1
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Sep 20 21:07:45 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Sep 20 21:07:49 2017 +0900

----------------------------------------------------------------------
 .../hadoop-common/src/main/conf/log4j.properties        | 12 ------------
 1 file changed, 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e58b247/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index bc1fa6c..5f4b22b 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -78,19 +78,7 @@ log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 #
 # TaskLog Appender
 #
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
 log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
 
 log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
 log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] hadoop git commit: YARN-7199. Fix TestAMRMClientContainerRequest.testOpportunisticAndGuaranteedRequests. (Botong Huang via asuresh)

Posted by as...@apache.org.
YARN-7199. Fix TestAMRMClientContainerRequest.testOpportunisticAndGuaranteedRequests. (Botong Huang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29dd5515
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29dd5515
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29dd5515

Branch: refs/heads/YARN-6592
Commit: 29dd55153e37471d9c177f4bd173f1d02bc96410
Parents: 0adc047
Author: Arun Suresh <as...@apache.org>
Authored: Mon Sep 18 11:26:44 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Mon Sep 18 11:26:44 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/client/api/AMRMClient.java  | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29dd5515/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 815915e..e86bd12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -118,8 +118,8 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
     private String nodeLabelsExpression;
     private ExecutionTypeRequest executionTypeRequest =
         ExecutionTypeRequest.newInstance();
-    private String resourceProfile;
-    
+    private String resourceProfile = ProfileCapability.DEFAULT_PROFILE;
+
     /**
      * Instantiates a {@link ContainerRequest} with the given constraints and
      * locality relaxation enabled.
@@ -540,6 +540,11 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
         return this;
       }
 
+      public ContainerRequestBuilder resourceProfile(String resourceProfile) {
+        containerRequest.resourceProfile = resourceProfile;
+        return this;
+      }
+
       public ContainerRequest build() {
         containerRequest.sanityCheck();
         return containerRequest;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] hadoop git commit: YARN-6499. Remove the doc about Schedulable#redistributeShare(). (Contributed by Chetna Chaudhari via Yufei Gu)

Posted by as...@apache.org.
YARN-6499. Remove the doc about Schedulable#redistributeShare(). (Contributed by Chetna Chaudhari via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9019e1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9019e1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9019e1f

Branch: refs/heads/YARN-6592
Commit: a9019e1fb753f15c1927e3f9355996fd6544c14f
Parents: 647b752
Author: Yufei Gu <yu...@apache.org>
Authored: Tue Sep 19 18:27:37 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Tue Sep 19 18:28:31 2017 -0700

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/scheduler/fair/Schedulable.java  | 4 ----
 1 file changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9019e1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
index 4d6af98..bd1ff7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.yarn.api.records.Resource;
  * - updateDemand() is called periodically to compute the demand of the various
  *   jobs and queues, which may be expensive (e.g. jobs must iterate through all
  *   their tasks to count failed tasks, tasks that can be speculated, etc).
- * - redistributeShare() is called after demands are updated and a Schedulable's
- *   fair share has been set by its parent to let it distribute its share among
- *   the other Schedulables within it (e.g. for queues that want to perform fair
- *   sharing among their jobs).
  */
 @Private
 @Unstable


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] hadoop git commit: YARN-6977. Node information is not provided for non am containers in RM logs. (Suma Shivaprasad via wangda)

Posted by as...@apache.org.
YARN-6977. Node information is not provided for non am containers in RM logs. (Suma Shivaprasad via wangda)

Change-Id: I0c44d09a560446dee2ba68c2b9ae69fce0ec1d3e
(cherry picked from commit 8a42e922fad613f3cf1cc6cb0f3fa72546a9cc56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/958e8c0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/958e8c0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/958e8c0e

Branch: refs/heads/YARN-6592
Commit: 958e8c0e257216c82f68fee726e5280a919da94a
Parents: ef8cd5d
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Sep 15 21:24:11 2017 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Sep 15 21:29:31 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/scheduler/SchedulerNode.java       |  8 --------
 .../scheduler/common/fica/FiCaSchedulerNode.java       | 13 +++++++++++++
 .../scheduler/fair/FSSchedulerNode.java                | 10 ++++++++++
 3 files changed, 23 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 272537c..90fa3e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -170,14 +170,6 @@ public abstract class SchedulerNode {
 
     launchedContainers.put(container.getId(),
         new ContainerInfo(rmContainer, launchedOnNode));
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Assigned container " + container.getId() + " of capacity "
-              + container.getResource() + " on host " + rmNode.getNodeAddress()
-              + ", which has " + numContainers + " containers, "
-              + getAllocatedResource() + " used and " + getUnallocatedResource()
-              + " available after allocation");
-    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
index c26a11b..7277779 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
@@ -160,4 +160,17 @@ public class FiCaSchedulerNode extends SchedulerNode {
   public synchronized Map<ContainerId, RMContainer> getKillableContainers() {
     return Collections.unmodifiableMap(killableContainers);
   }
+
+  protected synchronized void allocateContainer(RMContainer rmContainer,
+      boolean launchedOnNode) {
+    super.allocateContainer(rmContainer, launchedOnNode);
+
+    final Container container = rmContainer.getContainer();
+    LOG.info("Assigned container " + container.getId() + " of capacity "
+          + container.getResource() + " on host " + getRMNode().getNodeAddress()
+          + ", which has " + getNumContainers() + " containers, "
+          + getAllocatedResource() + " used and " + getUnallocatedResource()
+          + " available after allocation");
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
index 93646f4..44ec9c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -239,6 +240,15 @@ public class FSSchedulerNode extends SchedulerNode {
   protected synchronized void allocateContainer(RMContainer rmContainer,
                                                 boolean launchedOnNode) {
     super.allocateContainer(rmContainer, launchedOnNode);
+    if (LOG.isDebugEnabled()) {
+      final Container container = rmContainer.getContainer();
+      LOG.debug("Assigned container " + container.getId() + " of capacity "
+          + container.getResource() + " on host " + getRMNode().getNodeAddress()
+          + ", which has " + getNumContainers() + " containers, "
+          + getAllocatedResource() + " used and " + getUnallocatedResource()
+          + " available after allocation");
+    }
+
     Resource allocated = rmContainer.getAllocatedResource();
     if (!Resources.isNone(allocated)) {
       // check for satisfied preemption request and update bookkeeping


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] hadoop git commit: HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng Wang and Andrew Wang.

Posted by as...@apache.org.
HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng Wang and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59830ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59830ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59830ca7

Branch: refs/heads/YARN-6592
Commit: 59830ca772dfb5dcc8b3e5281ca482dea5a5fa3e
Parents: 7bbeacb
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 19 13:44:42 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 19 13:44:42 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 13 +++++++----
 .../hadoop/hdfs/TestWriteReadStripedFile.java   | 24 ++++++++++++--------
 .../hdfs/TestWriteStripedFileWithFailure.java   |  3 ++-
 3 files changed, 25 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 1489e48..c771d21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -79,10 +79,15 @@ public class StripedFileTestUtil {
     assertEquals("File length should be the same", fileLength, status.getLen());
   }
 
-  static void verifyPread(FileSystem fs, Path srcPath,  int fileLength,
-      byte[] expected, byte[] buf) throws IOException {
-    final ErasureCodingPolicy ecPolicy =
-        ((DistributedFileSystem)fs).getErasureCodingPolicy(srcPath);
+  static void verifyPread(DistributedFileSystem fs, Path srcPath,
+      int fileLength, byte[] expected, byte[] buf) throws IOException {
+    final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(srcPath);
+    verifyPread(fs, srcPath, fileLength, expected, buf, ecPolicy);
+  }
+
+  static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
+      byte[] expected, byte[] buf, ErasureCodingPolicy ecPolicy)
+      throws IOException {
     try (FSDataInputStream in = fs.open(srcPath)) {
       int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
           ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index f27c978..805bcea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
@@ -47,12 +48,13 @@ import java.util.Random;
 public class TestWriteReadStripedFile {
   public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
   private final ErasureCodingPolicy ecPolicy =
-      StripedFileTestUtil.getDefaultECPolicy();
+      SystemErasureCodingPolicies.getByID(
+          SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
   private final int cellSize = ecPolicy.getCellSize();
   private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
   private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
   private final int numDNs = dataBlocks + parityBlocks;
-  private final int stripesPerBlock = 4;
+  private final int stripesPerBlock = 2;
   private final int blockSize = stripesPerBlock * cellSize;
   private final int blockGroupSize = blockSize * dataBlocks;
 
@@ -78,11 +80,10 @@ public class TestWriteReadStripedFile {
         false);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     fs = cluster.getFileSystem();
-    fs.enableErasureCodingPolicy(
-        StripedFileTestUtil.getDefaultECPolicy().getName());
+    fs.enableErasureCodingPolicy(ecPolicy.getName());
     fs.mkdirs(new Path("/ec"));
     cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
-        StripedFileTestUtil.getDefaultECPolicy().getName());
+        ecPolicy.getName());
   }
 
   @After
@@ -225,7 +226,8 @@ public class TestWriteReadStripedFile {
 
     byte[] smallBuf = new byte[1024];
     byte[] largeBuf = new byte[fileLength + 100];
-    StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+    StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected,
+        largeBuf);
 
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
         largeBuf);
@@ -268,13 +270,15 @@ public class TestWriteReadStripedFile {
 
     byte[] smallBuf = new byte[1024];
     byte[] largeBuf = new byte[fileLength + 100];
-    // TODO: HDFS-8797
-    //StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+    StripedFileTestUtil
+        .verifyPread(fs, srcPath, fileLength, expected, largeBuf, ecPolicy);
 
-    StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, largeBuf);
+    StripedFileTestUtil
+        .verifyStatefulRead(fs, srcPath, fileLength, expected, largeBuf);
     StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
         blockGroupSize);
-    StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
+    StripedFileTestUtil
+        .verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
     // webhdfs doesn't support bytebuffer read
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
index 03e9e10..c859b71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
@@ -160,7 +160,8 @@ public class TestWriteStripedFileWithFailure {
         blockSize * dataBlocks);
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
         smallBuf);
-    StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+    StripedFileTestUtil.verifyPread((DistributedFileSystem)fs, srcPath,
+        fileLength, expected, largeBuf);
 
     // delete the file
     fs.delete(srcPath, true);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] hadoop git commit: HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)

Posted by as...@apache.org.
HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ee25278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ee25278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ee25278

Branch: refs/heads/YARN-6592
Commit: 1ee25278c891e95ba2ab142e5b78aebd752ea163
Parents: 7c73292
Author: Haibo Chen <ha...@apache.org>
Authored: Mon Sep 18 14:25:35 2017 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Mon Sep 18 14:25:35 2017 -0700

----------------------------------------------------------------------
 hadoop-client-modules/hadoop-client/pom.xml | 31 ++++++++++++++++++++++++
 1 file changed, 31 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ee25278/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index bed3f5c..6500ebf 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,6 +179,37 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-client</artifactId>
+      <scope>compile</scope>
+      <exclusions>
+        <!--Excluding hadoop-yarn-api & hadoop-annotations as they are already
+        included as direct dependencies. Guava,commons-cli and log4j are
+        transitive dependencies -->
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-cli</groupId>
+          <artifactId>commons-cli</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
       <scope>compile</scope>
       <exclusions>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] hadoop git commit: Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)" HADOOP-14879 Build failure due to failing hadoop-client-check-invariants This reverts commit 1ee25278c891e95ba2ab142e5b78aebd

Posted by as...@apache.org.
Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)"
HADOOP-14879 Build failure due to failing hadoop-client-check-invariants
This reverts commit 1ee25278c891e95ba2ab142e5b78aebd752ea163.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa6e8d2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa6e8d2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa6e8d2d

Branch: refs/heads/YARN-6592
Commit: aa6e8d2dff533c3d0c86776567c860548723c21c
Parents: dba7a7d
Author: Steve Loughran <st...@apache.org>
Authored: Tue Sep 19 11:53:11 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Sep 19 11:53:11 2017 +0100

----------------------------------------------------------------------
 hadoop-client-modules/hadoop-client/pom.xml | 31 ------------------------
 1 file changed, 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa6e8d2d/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index 6500ebf..bed3f5c 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,37 +179,6 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-client</artifactId>
-      <scope>compile</scope>
-      <exclusions>
-        <!--Excluding hadoop-yarn-api & hadoop-annotations as they are already
-        included as direct dependencies. Guava,commons-cli and log4j are
-        transitive dependencies -->
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-yarn-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-annotations</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.google.guava</groupId>
-          <artifactId>guava</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>commons-cli</groupId>
-          <artifactId>commons-cli</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
       <scope>compile</scope>
       <exclusions>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] hadoop git commit: HDFS-12323. NameNode terminates after full GC thinking QJM unresponsive if full GC is much longer than timeout. Contributed by Erik Krogen.

Posted by as...@apache.org.
HDFS-12323. NameNode terminates after full GC thinking QJM unresponsive if full GC is much longer than timeout. Contributed by Erik Krogen.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90894c72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90894c72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90894c72

Branch: refs/heads/YARN-6592
Commit: 90894c7262df0243e795b675f3ac9f7b322ccd11
Parents: b9b607d
Author: Erik Krogen <ek...@linkedin.com>
Authored: Thu Sep 14 15:53:33 2017 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Sep 15 13:56:27 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/qjournal/client/QuorumCall.java | 65 ++++++++++++++++----
 .../hdfs/qjournal/client/TestQuorumCall.java    | 31 +++++++++-
 2 files changed, 82 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90894c72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
index dc32318..dee74e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
@@ -24,7 +24,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StopWatch;
-import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Timer;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -35,6 +35,7 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.protobuf.Message;
 import com.google.protobuf.TextFormat;
 
+
 /**
  * Represents a set of calls for which a quorum of results is needed.
  * @param <KEY> a key used to identify each of the outgoing calls
@@ -60,11 +61,12 @@ class QuorumCall<KEY, RESULT> {
    * fraction of the configured timeout for any call.
    */
   private static final float WAIT_PROGRESS_WARN_THRESHOLD = 0.7f;
-  private final StopWatch quorumStopWatch = new StopWatch();
+  private final StopWatch quorumStopWatch;
+  private final Timer timer;
   
   static <KEY, RESULT> QuorumCall<KEY, RESULT> create(
-      Map<KEY, ? extends ListenableFuture<RESULT>> calls) {
-    final QuorumCall<KEY, RESULT> qr = new QuorumCall<KEY, RESULT>();
+      Map<KEY, ? extends ListenableFuture<RESULT>> calls, Timer timer) {
+    final QuorumCall<KEY, RESULT> qr = new QuorumCall<KEY, RESULT>(timer);
     for (final Entry<KEY, ? extends ListenableFuture<RESULT>> e : calls.entrySet()) {
       Preconditions.checkArgument(e.getValue() != null,
           "null future for key: " + e.getKey());
@@ -82,18 +84,53 @@ class QuorumCall<KEY, RESULT> {
     }
     return qr;
   }
-  
+
+  static <KEY, RESULT> QuorumCall<KEY, RESULT> create(
+      Map<KEY, ? extends ListenableFuture<RESULT>> calls) {
+    return create(calls, new Timer());
+  }
+
+  /**
+   * Not intended for outside use.
+   */
   private QuorumCall() {
+    this(new Timer());
+  }
+
+  private QuorumCall(Timer timer) {
     // Only instantiated from factory method above
+    this.timer = timer;
+    this.quorumStopWatch = new StopWatch(timer);
   }
 
+  /**
+   * Used in conjunction with {@link #getQuorumTimeoutIncreaseMillis(long, int)}
+   * to check for pauses.
+   */
   private void restartQuorumStopWatch() {
     quorumStopWatch.reset().start();
   }
 
-  private boolean shouldIncreaseQuorumTimeout(long offset, int millis) {
+  /**
+   * Check for a pause (e.g. GC) since the last time
+   * {@link #restartQuorumStopWatch()} was called. If detected, return the
+   * length of the pause; else, -1.
+   * @param offset Offset the elapsed time by this amount; use if some amount
+   *               of pause was expected
+   * @param millis Total length of timeout in milliseconds
+   * @return Length of pause, if detected, else -1
+   */
+  private long getQuorumTimeoutIncreaseMillis(long offset, int millis) {
     long elapsed = quorumStopWatch.now(TimeUnit.MILLISECONDS);
-    return elapsed + offset > (millis * WAIT_PROGRESS_INFO_THRESHOLD);
+    long pauseTime = elapsed + offset;
+    if (pauseTime > (millis * WAIT_PROGRESS_INFO_THRESHOLD)) {
+      QuorumJournalManager.LOG.info("Pause detected while waiting for " +
+          "QuorumCall response; increasing timeout threshold by pause time " +
+          "of " + pauseTime + " ms.");
+      return pauseTime;
+    } else {
+      return -1;
+    }
   }
 
   
@@ -119,7 +156,7 @@ class QuorumCall<KEY, RESULT> {
       int minResponses, int minSuccesses, int maxExceptions,
       int millis, String operationName)
       throws InterruptedException, TimeoutException {
-    long st = Time.monotonicNow();
+    long st = timer.monotonicNow();
     long nextLogTime = st + (long)(millis * WAIT_PROGRESS_INFO_THRESHOLD);
     long et = st + millis;
     while (true) {
@@ -128,7 +165,7 @@ class QuorumCall<KEY, RESULT> {
       if (minResponses > 0 && countResponses() >= minResponses) return;
       if (minSuccesses > 0 && countSuccesses() >= minSuccesses) return;
       if (maxExceptions >= 0 && countExceptions() > maxExceptions) return;
-      long now = Time.monotonicNow();
+      long now = timer.monotonicNow();
       
       if (now > nextLogTime) {
         long waited = now - st;
@@ -154,8 +191,9 @@ class QuorumCall<KEY, RESULT> {
       long rem = et - now;
       if (rem <= 0) {
         // Increase timeout if a full GC occurred after restarting stopWatch
-        if (shouldIncreaseQuorumTimeout(0, millis)) {
-          et = et + millis;
+        long timeoutIncrease = getQuorumTimeoutIncreaseMillis(0, millis);
+        if (timeoutIncrease > 0) {
+          et += timeoutIncrease;
         } else {
           throw new TimeoutException();
         }
@@ -165,8 +203,9 @@ class QuorumCall<KEY, RESULT> {
       rem = Math.max(rem, 1);
       wait(rem);
       // Increase timeout if a full GC occurred after restarting stopWatch
-      if (shouldIncreaseQuorumTimeout(-rem, millis)) {
-        et = et + millis;
+      long timeoutIncrease = getQuorumTimeoutIncreaseMillis(-rem, millis);
+      if (timeoutIncrease > 0) {
+        et += timeoutIncrease;
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90894c72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
index 506497e..97cf2f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
@@ -23,7 +23,7 @@ import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.hadoop.hdfs.qjournal.client.QuorumCall;
+import org.apache.hadoop.util.FakeTimer;
 import org.junit.Test;
 
 import com.google.common.base.Joiner;
@@ -83,4 +83,33 @@ public class TestQuorumCall {
     }
   }
 
+  @Test(timeout=10000)
+  public void testQuorumSucceedsWithLongPause() throws Exception {
+    final Map<String, SettableFuture<String>> futures = ImmutableMap.of(
+        "f1", SettableFuture.<String>create());
+
+    FakeTimer timer = new FakeTimer() {
+      private int callCount = 0;
+      @Override
+      public long monotonicNowNanos() {
+        callCount++;
+        if (callCount == 1) {
+          long old = super.monotonicNowNanos();
+          advance(1000000);
+          return old;
+        } else if (callCount == 10) {
+          futures.get("f1").set("first future");
+          return super.monotonicNowNanos();
+        } else {
+          return super.monotonicNowNanos();
+        }
+      }
+    };
+
+    QuorumCall<String, String> q = QuorumCall.create(futures, timer);
+    assertEquals(0, q.countResponses());
+
+    q.waitFor(1, 0, 0, 3000, "test"); // wait for 1 response
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] hadoop git commit: YARN-4266. Allow users to enter containers as UID:GID pair instead of by username. Contributed by luhuichun, Zhankun Tang, and Eric Badger.

Posted by as...@apache.org.
YARN-4266. Allow users to enter containers as UID:GID pair instead of by username. Contributed by luhuichun, Zhankun Tang, and Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfd1a72b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfd1a72b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfd1a72b

Branch: refs/heads/YARN-6592
Commit: bfd1a72ba8fbb06da73fede2a85e0b544d6ab43f
Parents: e5e1851
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Sep 21 17:41:34 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Sep 21 17:41:34 2017 -0500

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     | 21 ++++++
 .../src/main/resources/yarn-default.xml         | 19 +++++
 .../runtime/DockerLinuxContainerRuntime.java    | 76 ++++++++++++++++++-
 .../linux/runtime/docker/DockerRunCommand.java  |  7 ++
 .../impl/container-executor.c                   |  4 +
 .../runtime/TestDockerContainerRuntime.java     | 79 +++++++++++++++++++-
 6 files changed, 204 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 114453f..f58833c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1587,6 +1587,27 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS =
       false;
 
+  /** enable user remapping. */
+  public static final String NM_DOCKER_ENABLE_USER_REMAPPING =
+      DOCKER_CONTAINER_RUNTIME_PREFIX + "enable-userremapping.allowed";
+
+  /** Set enable user remapping as false by default. */
+  public static final boolean DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING = false;
+
+  /** lower limit for acceptable uids of user remapped user. */
+  public static final String NM_DOCKER_USER_REMAPPING_UID_THRESHOLD =
+      DOCKER_CONTAINER_RUNTIME_PREFIX + "userremapping-uid-threshold";
+
+  /** Set user remapping lower uid limit to 1 by default. */
+  public static final int DEFAULT_NM_DOCKER_USER_REMAPPING_UID_THRESHOLD = 1;
+
+  /** lower limit for acceptable gids of user remapped user. */
+  public static final String NM_DOCKER_USER_REMAPPING_GID_THRESHOLD =
+      DOCKER_CONTAINER_RUNTIME_PREFIX + "userremapping-gid-threshold";
+
+  /** Set user remapping lower gid limit to 1 by default. */
+  public static final int DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD = 1;
+
   /** ACL list for users allowed to run privileged containers. */
   public static final String NM_DOCKER_PRIVILEGED_CONTAINERS_ACL =
       DOCKER_CONTAINER_RUNTIME_PREFIX + "privileged-containers.acl";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0440458..8453dc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1643,6 +1643,25 @@
   </property>
 
   <property>
+    <description>Property to enable docker user remapping</description>
+    <name>yarn.nodemanager.runtime.linux.docker.enable-userremapping.allowed</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>lower limit for acceptable uids of user remapped user</description>
+    <name>yarn.nodemanager.runtime.linux.docker.userremapping-uid-threshold</name>
+    <value>1</value>
+  </property>
+
+
+  <property>
+    <description>lower limit for acceptable gids of user remapped user</description>
+    <name>yarn.nodemanager.runtime.linux.docker.userremapping-gid-threshold</name>
+    <value>1</value>
+  </property>
+
+  <property>
     <description>The mode in which the Java Container Sandbox should run detailed by
       the JavaSandboxLinuxContainerRuntime.</description>
     <name>yarn.nodemanager.runtime.linux.sandbox-mode</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 6491a99..b8d9b0a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
@@ -164,6 +165,9 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER =
       "YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER";
   @InterfaceAudience.Private
+  public static final String ENV_DOCKER_CONTAINER_RUN_ENABLE_USER_REMAPPING =
+      "YARN_CONTAINER_RUNTIME_DOCKER_RUN_ENABLE_USER_REMAPPING";
+  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS =
       "YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS";
 
@@ -175,6 +179,9 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   private String cgroupsRootDirectory;
   private CGroupsHandler cGroupsHandler;
   private AccessControlList privilegedContainersAcl;
+  private boolean enableUserReMapping;
+  private int userRemappingUidThreshold;
+  private int userRemappingGidThreshold;
 
   /**
    * Return whether the given environment variables indicate that the operation
@@ -260,6 +267,18 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     privilegedContainersAcl = new AccessControlList(conf.getTrimmed(
         YarnConfiguration.NM_DOCKER_PRIVILEGED_CONTAINERS_ACL,
         YarnConfiguration.DEFAULT_NM_DOCKER_PRIVILEGED_CONTAINERS_ACL));
+
+    enableUserReMapping = conf.getBoolean(
+      YarnConfiguration.NM_DOCKER_ENABLE_USER_REMAPPING,
+      YarnConfiguration.DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING);
+
+    userRemappingUidThreshold = conf.getInt(
+      YarnConfiguration.NM_DOCKER_USER_REMAPPING_UID_THRESHOLD,
+      YarnConfiguration.DEFAULT_NM_DOCKER_USER_REMAPPING_UID_THRESHOLD);
+
+    userRemappingGidThreshold = conf.getInt(
+      YarnConfiguration.NM_DOCKER_USER_REMAPPING_GID_THRESHOLD,
+      YarnConfiguration.DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD);
   }
 
   @Override
@@ -436,6 +455,34 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         "resource: " + mount);
   }
 
+  private String getUserIdInfo(String userName)
+      throws ContainerExecutionException {
+    String id = "";
+    Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-u", userName});
+    try {
+      shexec.execute();
+      id = shexec.getOutput().replaceAll("[^0-9]", "");
+    } catch (Exception e) {
+      throw new ContainerExecutionException(e);
+    }
+    return id;
+  }
+
+  private String[] getGroupIdInfo(String userName)
+      throws ContainerExecutionException {
+    String[] id = null;
+    Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-G", userName});
+    try {
+      shexec.execute();
+      id = shexec.getOutput().replace("\n", "").split(" ");
+    } catch (Exception e) {
+      throw new ContainerExecutionException(e);
+    }
+    return id;
+  }
+
   @Override
   public void launchContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
@@ -458,7 +505,30 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     String containerIdStr = container.getContainerId().toString();
     String runAsUser = ctx.getExecutionAttribute(RUN_AS_USER);
+    String dockerRunAsUser = runAsUser;
     Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR);
+    String[] groups = null;
+
+    if (enableUserReMapping) {
+      String uid = getUserIdInfo(runAsUser);
+      groups = getGroupIdInfo(runAsUser);
+      String gid = groups[0];
+      if(Integer.parseInt(uid) < userRemappingUidThreshold) {
+        String message = "uid: " + uid + " below threshold: "
+            + userRemappingUidThreshold;
+        throw new ContainerExecutionException(message);
+      }
+      for(int i = 0; i < groups.length; i++) {
+        String group = groups[i];
+        if (Integer.parseInt(group) < userRemappingGidThreshold) {
+          String message = "gid: " + group
+              + " below threshold: " + userRemappingGidThreshold;
+          throw new ContainerExecutionException(message);
+        }
+      }
+      dockerRunAsUser = uid + ":" + gid;
+    }
+
     //List<String> -> stored as List -> fetched/converted to List<String>
     //we can't do better here thanks to type-erasure
     @SuppressWarnings("unchecked")
@@ -481,7 +551,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     @SuppressWarnings("unchecked")
     DockerRunCommand runCommand = new DockerRunCommand(containerIdStr,
-        runAsUser, imageName)
+        dockerRunAsUser, imageName)
         .detachOnRun()
         .setContainerWorkDir(containerWorkDir.toString())
         .setNetworkType(network);
@@ -542,6 +612,10 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       runCommand.setOverrideCommandWithArgs(overrideCommands);
     }
 
+    if(enableUserReMapping) {
+      runCommand.groupAdd(groups);
+    }
+
     String commandFile = dockerClient.writeCommandToTempFile(runCommand,
         containerIdStr);
     PrivilegedOperation launchOp = buildLaunchOp(ctx,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index b645754..1e1e6e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -114,6 +114,13 @@ public class DockerRunCommand extends DockerCommand {
     return this;
   }
 
+  public DockerRunCommand groupAdd(String[] groups) {
+    for(int i = 0; i < groups.length; i++) {
+      super.addCommandArguments("--group-add " + groups[i]);
+    }
+    return this;
+  }
+
   public DockerRunCommand setOverrideCommandWithArgs(
       List<String> overrideCommandWithArgs) {
     this.overrrideCommandWithArgs = overrideCommandWithArgs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 560ec18..e8bf564 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1258,6 +1258,7 @@ char* sanitize_docker_command(const char *line) {
     {"device", required_argument, 0, 'i' },
     {"detach", required_argument, 0, 't' },
     {"format", required_argument, 0, 'f' },
+    {"group-add", required_argument, 0, 'x' },
     {0, 0, 0, 0}
   };
 
@@ -1357,6 +1358,9 @@ char* sanitize_docker_command(const char *line) {
         strcat(output, optarg);
         strcat(output, " ");
         break;
+      case 'x':
+        quote_and_append_arg(&output, &output_size, "--group-add ", optarg);
+        break;
       default:
         fprintf(LOGFILE, "Unknown option in docker command, character %d %c, optionindex = %d\n", c, c, optind);
         fflush(LOGFILE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 0e314bf..e548790 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -77,6 +78,7 @@ public class TestDockerContainerRuntime {
   private ContainerLaunchContext context;
   private HashMap<String, String> env;
   private String image;
+  private String uidGidPair;
   private String runAsUser;
   private String user;
   private String appId;
@@ -126,6 +128,7 @@ public class TestDockerContainerRuntime {
     when(context.getEnvironment()).thenReturn(env);
     when(container.getUser()).thenReturn(submittingUser);
 
+    uidGidPair = "";
     runAsUser = "run_as_user";
     user = "user";
     appId = "app_id";
@@ -230,7 +233,6 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals(13, args.size());
 
     //verify arguments
-    Assert.assertEquals(runAsUser, args.get(0));
     Assert.assertEquals(user, args.get(1));
     Assert.assertEquals(Integer.toString(PrivilegedOperation.RunAsUserCommand
         .LAUNCH_DOCKER_CONTAINER.getValue()), args.get(2));
@@ -322,6 +324,81 @@ public class TestDockerContainerRuntime {
   }
 
   @Test
+  public void testContainerLaunchWithUserRemapping()
+      throws ContainerExecutionException, PrivilegedOperationException,
+      IOException {
+    conf.setBoolean(YarnConfiguration.NM_DOCKER_ENABLE_USER_REMAPPING,
+        true);
+    Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
+        new String[]{"whoami"});
+    shexec.execute();
+    // get rid of newline at the end
+    runAsUser = shexec.getOutput().replaceAll("\n$", "");
+    builder.setExecutionAttribute(RUN_AS_USER, runAsUser);
+
+    DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
+        mockExecutor, mockCGroupsHandler);
+    runtime.initialize(conf);
+    runtime.launchContainer(builder.build());
+
+    PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
+    List<String> args = op.getArguments();
+    String dockerCommandFile = args.get(11);
+
+    String uid = "";
+    String gid = "";
+    Shell.ShellCommandExecutor shexec1 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-u", runAsUser});
+    Shell.ShellCommandExecutor shexec2 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-g", runAsUser});
+    try {
+      shexec1.execute();
+      // get rid of newline at the end
+      uid = shexec1.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -u command: " + e);
+    }
+    try {
+      shexec2.execute();
+      // get rid of newline at the end
+      gid = shexec2.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -g command: " + e);
+    }
+    uidGidPair = uid + ":" + gid;
+
+    //This is the expected docker invocation for this case
+    StringBuffer expectedCommandTemplate = new StringBuffer("run --name=%1$s ")
+        .append("--user=%2$s -d ")
+        .append("--workdir=%3$s ")
+        .append("--net=host ")
+        .append("--hostname=" + defaultHostname + " ")
+        .append(getExpectedTestCapabilitiesArgumentString())
+        .append(getExpectedCGroupsMountString())
+        .append("-v %4$s:%4$s ")
+        .append("-v %5$s:%5$s ")
+        .append("-v %6$s:%6$s ")
+        .append("-v %7$s:%7$s ")
+        .append("-v %8$s:%8$s ")
+        .append("(--group-add \\d+ )*")
+        .append("%9$s ")
+        .append("bash %10$s/launch_container.sh");
+
+    String expectedCommand = String
+        .format(expectedCommandTemplate.toString(), containerId, uidGidPair,
+            containerWorkDir, containerLocalDirs.get(0), filecacheDirs.get(0),
+            containerWorkDir, containerLogDirs.get(0), userLocalDirs.get(0),
+            image, containerWorkDir);
+
+    List<String> dockerCommands = Files.readAllLines(
+        Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
+
+    Assert.assertEquals(1, dockerCommands.size());
+    //Assert.assertEquals(expectedCommand, dockerCommands.get(0));
+    Assert.assertTrue(dockerCommands.get(0).matches(expectedCommand));
+  }
+
+  @Test
   public void testAllowedNetworksConfiguration() throws
       ContainerExecutionException {
     //the default network configuration should cause


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

Posted by as...@apache.org.
YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b336632
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b336632
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b336632

Branch: refs/heads/YARN-6592
Commit: 8b336632acad10e45d029596c5e3196e1857d891
Parents: 53047f9
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Sep 20 22:22:43 2017 -0700

----------------------------------------------------------------------
 .../impl/pb/RpcClientFactoryPBImpl.java         |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java         |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java     | 49 ++++++++++++++++++++
 .../impl/pb/TestRpcServerFactoryPBImpl.java     | 48 +++++++++++++++++++
 4 files changed, 100 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap<Class<?>, Constructor<?>> cache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements RpcClientFactory {
     if (constructor == null) {
       Class<?> pbClazz = null;
       try {
-        pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+        pbClazz = conf.getClassByName(getPBImplClassName(protocol));
       } catch (ClassNotFoundException e) {
         throw new YarnRuntimeException("Failed to load class: ["
             + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap<Class<?>, Constructor<?>> serviceCache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
   private ConcurrentMap<Class<?>, Method> protoCache = new ConcurrentHashMap<Class<?>, Method>();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements RpcServerFactory {
     if (constructor == null) {
       Class<?> pbServiceImplClazz = null;
       try {
-        pbServiceImplClazz = localConf
+        pbServiceImplClazz = conf
             .getClassByName(getPbServiceImplClassName(protocol));
       } catch (ClassNotFoundException e) {
         throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements RpcServerFactory {
     if (method == null) {
       Class<?> protoClazz = null;
       try {
-        protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+        protoClazz = conf.getClassByName(getProtoClassName(protocol));
       } catch (ClassNotFoundException e) {
         throw new YarnRuntimeException("Failed to load class: ["
             + getProtoClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcClientFactoryPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcClientFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcClientFactoryPBImpl.java
new file mode 100644
index 0000000..41febd3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcClientFactoryPBImpl.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.factories.impl.pb;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test class for RpcClientFactoryPBImpl.
+ */
+public class TestRpcClientFactoryPBImpl {
+  @Test
+  public void testToUseCustomClassloader() throws Exception {
+    Configuration configuration = mock(Configuration.class);
+    RpcClientFactoryPBImpl rpcClientFactoryPB = RpcClientFactoryPBImpl.get();
+    try {
+      rpcClientFactoryPB.getClient(
+          Class.forName("org.apache.hadoop.yarn.api.ApplicationClientProtocol"),
+          -1, new InetSocketAddress(0), configuration);
+    } catch (Exception e) {
+      // Do nothing
+    }
+    verify(configuration, atLeastOnce()).getClassByName(anyString());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcServerFactoryPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcServerFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcServerFactoryPBImpl.java
new file mode 100644
index 0000000..6a5ce84
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/factories/impl/pb/TestRpcServerFactoryPBImpl.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.factories.impl.pb;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test class for RpcServerFactoryPBImpl.
+ */
+public class TestRpcServerFactoryPBImpl {
+  @Test
+  public void testToUseCustomClassloader() throws Exception {
+    Configuration configuration = mock(Configuration.class);
+    RpcServerFactoryPBImpl rpcServerFactoryPB = RpcServerFactoryPBImpl.get();
+    try {
+      rpcServerFactoryPB.getServer(
+          Class.forName("org.apache.hadoop.yarn.api.ApplicationClientProtocol"),
+          -1, new InetSocketAddress(0), configuration, null, 1);
+    } catch (Exception e) {
+      // Do nothing
+    }
+    verify(configuration, atLeastOnce()).getClassByName(anyString());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe

Posted by as...@apache.org.
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d61304
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d61304
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d61304

Branch: refs/heads/YARN-6592
Commit: b3d61304f2fa4a99526f7a60ccaac9f262083079
Parents: 1ee2527
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Sep 18 17:04:43 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Sep 18 17:04:43 2017 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d61304/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..06a3e42 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
         return;
       }
 
-      // this audit log is disabled by default,
-      // to turn it on please enable this audit log
-      // on log4j.properties by uncommenting the setting
-      if (AUDITLOG.isDebugEnabled()) {
-        AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
-                         " reducer " + reduceQ.get(0));
-      }
       int reduceId;
       String jobId;
       try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
 
       // Now set the response headers.
       setResponseHeaders(response, keepAliveParam, contentLength);
+
+      // this audit log is disabled by default,
+      // to turn it on please enable this audit log
+      // on log4j.properties by uncommenting the setting
+      if (AUDITLOG.isDebugEnabled()) {
+        StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
+        sb.append(" mappers: ").append(mapIds);
+        sb.append(" reducer ").append(reduce);
+        sb.append(" length ").append(contentLength);
+        AUDITLOG.debug(sb.toString());
+      }
     }
 
     protected void setResponseHeaders(HttpResponse response,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] hadoop git commit: HDFS-12460. Make addErasureCodingPolicy an idempotent operation. Contributed by Sammi Chen

Posted by as...@apache.org.
HDFS-12460. Make addErasureCodingPolicy an idempotent operation. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f9af246
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f9af246
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f9af246

Branch: refs/heads/YARN-6592
Commit: 0f9af246e89e4ad3c4d7ff2c1d7ec9b397494a03
Parents: e81596d
Author: Kai Zheng <ka...@intel.com>
Authored: Mon Sep 18 18:07:12 2017 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Mon Sep 18 18:07:12 2017 +0800

----------------------------------------------------------------------
 .../hdfs/server/namenode/ErasureCodingPolicyManager.java      | 7 ++++---
 .../org/apache/hadoop/hdfs/TestErasureCodingPolicies.java     | 2 +-
 .../hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java   | 4 ++--
 .../hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java  | 4 ++--
 4 files changed, 9 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 3a46c30..90699b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -242,14 +242,15 @@ public final class ErasureCodingPolicyManager {
         policy.getSchema(), policy.getCellSize());
     for (ErasureCodingPolicy p : getPolicies()) {
       if (p.getName().equals(assignedNewName)) {
-        throw new HadoopIllegalArgumentException("The policy name " +
-            assignedNewName + " already exists");
+        LOG.info("The policy name " + assignedNewName + " already exists");
+        return p;
       }
       if (p.getSchema().equals(policy.getSchema()) &&
           p.getCellSize() == policy.getCellSize()) {
-        throw new HadoopIllegalArgumentException("A policy with same schema "
+        LOG.info("A policy with same schema "
             + policy.getSchema().toString() + " and cell size "
             + p.getCellSize() + " already exists");
+        return p;
       }
     }
     policy.setName(assignedNewName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 19277c4..4f2040b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -718,7 +718,7 @@ public class TestErasureCodingPolicies {
     policyArray  = new ErasureCodingPolicy[]{policy0};
     responses = fs.addErasureCodingPolicies(policyArray);
     assertEquals(1, responses.length);
-    assertFalse(responses[0].isSucceed());
+    assertTrue(responses[0].isSucceed());
 
     // Test add policy successfully
     newPolicy =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index d217813..42ff698 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -436,7 +436,7 @@ public class TestNamenodeRetryCache {
 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -455,7 +455,7 @@ public class TestNamenodeRetryCache {
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index b40006b..1d114d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -166,7 +166,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -187,7 +187,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] hadoop git commit: MAPREDUCE-6964. BaileyBorweinPlouffe should use Time.monotonicNow for measuring durations. Contributed by Chetna Chaudhari

Posted by as...@apache.org.
MAPREDUCE-6964. BaileyBorweinPlouffe should use Time.monotonicNow for measuring durations. Contributed by Chetna Chaudhari


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9db0afa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9db0afa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9db0afa

Branch: refs/heads/YARN-6592
Commit: b9db0afa1e65d0343ad4e5760c36e042a3e704b0
Parents: 6bf921a
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Sep 21 09:37:19 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Sep 21 09:37:19 2017 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9db0afa/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index 7e98d7d..b9987a5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.Reducer;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -379,7 +380,7 @@ public class BaileyBorweinPlouffe extends Configured implements Tool {
 
     // start a map/reduce job
     out.println("\nStarting Job ...");
-    final long startTime = System.currentTimeMillis();
+    final long startTime = Time.monotonicNow();
     try {
       if (!job.waitForCompletion(true)) {
         out.println("Job failed.");
@@ -388,7 +389,7 @@ public class BaileyBorweinPlouffe extends Configured implements Tool {
     } catch (Exception e) {
       throw new RuntimeException(e);
     } finally {
-      final double duration = (System.currentTimeMillis() - startTime)/1000.0;
+      final double duration = (Time.monotonicNow() - startTime)/1000.0;
       out.println("Duration is " + duration + " seconds.");
     }
     out.println("Output file: " + hexfile);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] hadoop git commit: HDFS-12472. Add JUNIT timeout to TestBlockStatsMXBean. Contributed by Bharat Viswanadham.

Posted by as...@apache.org.
HDFS-12472. Add JUNIT timeout to TestBlockStatsMXBean. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d7cc22a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d7cc22a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d7cc22a

Branch: refs/heads/YARN-6592
Commit: 8d7cc22ac286302960c7939bc53574cbfeab1846
Parents: 7618fa9
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat Sep 16 10:09:27 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat Sep 16 10:09:27 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/TestBlockStatsMXBean.java       | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d7cc22a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index bcf38d6..64364cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -41,8 +41,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.eclipse.jetty.util.ajax.JSON;
+import org.junit.rules.Timeout;
 
 /**
  * Class for testing {@link BlockStatsMXBean} implementation
@@ -51,6 +53,9 @@ public class TestBlockStatsMXBean {
 
   private MiniDFSCluster cluster;
 
+  @Rule
+  public Timeout globalTimeout = new Timeout(300000);
+
   @Before
   public void setup() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] hadoop git commit: YARN-7174. Add retry logic in LogsCLI when fetch running application logs. Contributed by Xuan Gong.

Posted by as...@apache.org.
YARN-7174. Add retry logic in LogsCLI when fetch running application logs. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a84c24b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a84c24b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a84c24b

Branch: refs/heads/YARN-6592
Commit: 1a84c24b0cf6674fa755403971fa57d8e412b320
Parents: 90894c7
Author: Junping Du <ju...@apache.org>
Authored: Fri Sep 15 15:33:24 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Fri Sep 15 15:33:24 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 175 +++++++++++++++-
 .../hadoop/yarn/client/cli/TestLogsCLI.java     | 205 +++++++++++++------
 2 files changed, 309 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a84c24b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 1a3db26..9a8ba4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -22,6 +22,9 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.PrintStream;
+import java.net.ConnectException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -75,9 +78,11 @@ import org.codehaus.jettison.json.JSONObject;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.filter.ClientFilter;
 
 @Public
 @Evolving
@@ -98,14 +103,27 @@ public class LogsCLI extends Configured implements Tool {
       = "show_container_log_info";
   private static final String OUT_OPTION = "out";
   private static final String SIZE_OPTION = "size";
+  private static final String CLIENT_MAX_RETRY_OPTION = "client_max_retries";
+  private static final String CLIENT_RETRY_INTERVAL_OPTION
+      = "client_retry_interval_ms";
   public static final String HELP_CMD = "help";
+
   private PrintStream outStream = System.out;
   private YarnClient yarnClient = null;
+  private Client webServiceClient = null;
+
+  private static final int DEFAULT_MAX_RETRIES = 30;
+  private static final long DEFAULT_RETRY_INTERVAL = 1000;
+
+  @Private
+  @VisibleForTesting
+  ClientConnectionRetry connectionRetry;
 
   @Override
   public int run(String[] args) throws Exception {
     try {
       yarnClient = createYarnClient();
+      webServiceClient = Client.create();
       return runCommand(args);
     } finally {
       if (yarnClient != null) {
@@ -140,6 +158,8 @@ public class LogsCLI extends Configured implements Tool {
     List<String> amContainersList = new ArrayList<String>();
     String localDir = null;
     long bytes = Long.MAX_VALUE;
+    int maxRetries = DEFAULT_MAX_RETRIES;
+    long retryInterval = DEFAULT_RETRY_INTERVAL;
     try {
       CommandLine commandLine = parser.parse(opts, args, false);
       appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION);
@@ -171,6 +191,14 @@ public class LogsCLI extends Configured implements Tool {
       if (commandLine.hasOption(SIZE_OPTION)) {
         bytes = Long.parseLong(commandLine.getOptionValue(SIZE_OPTION));
       }
+      if (commandLine.hasOption(CLIENT_MAX_RETRY_OPTION)) {
+        maxRetries = Integer.parseInt(commandLine.getOptionValue(
+            CLIENT_MAX_RETRY_OPTION));
+      }
+      if (commandLine.hasOption(CLIENT_RETRY_INTERVAL_OPTION)) {
+        retryInterval = Long.parseLong(commandLine.getOptionValue(
+            CLIENT_RETRY_INTERVAL_OPTION));
+      }
     } catch (ParseException e) {
       System.err.println("options parsing failed: " + e.getMessage());
       printHelpMessage(printOpts);
@@ -232,6 +260,11 @@ public class LogsCLI extends Configured implements Tool {
       }
     }
 
+    // Set up Retry WebService Client
+    connectionRetry = new ClientConnectionRetry(maxRetries, retryInterval);
+    ClientJerseyRetryFilter retryFilter = new ClientJerseyRetryFilter();
+    webServiceClient.addFilter(retryFilter);
+
     LogCLIHelpers logCliHelper = new LogCLIHelpers();
     logCliHelper.setConf(getConf());
 
@@ -342,7 +375,6 @@ public class LogsCLI extends Configured implements Tool {
   protected List<JSONObject> getAMContainerInfoForRMWebService(
       Configuration conf, String appId) throws ClientHandlerException,
       UniformInterfaceException, JSONException {
-    Client webServiceClient = Client.create();
     String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf);
 
     WebResource webResource = webServiceClient.resource(webAppAddress);
@@ -364,7 +396,6 @@ public class LogsCLI extends Configured implements Tool {
   private List<JSONObject> getAMContainerInfoForAHSWebService(
       Configuration conf, String appId) throws ClientHandlerException,
       UniformInterfaceException, JSONException {
-    Client webServiceClient = Client.create();
     String webAppAddress =
         WebAppUtils.getHttpSchemePrefix(conf)
             + WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
@@ -417,7 +448,6 @@ public class LogsCLI extends Configured implements Tool {
       throws IOException {
     List<Pair<ContainerLogFileInfo, String>> logFileInfos
         = new ArrayList<>();
-    Client webServiceClient = Client.create();
     try {
       WebResource webResource = webServiceClient
           .resource(WebAppUtils.getHttpSchemePrefix(conf) + nodeHttpAddress);
@@ -490,7 +520,6 @@ public class LogsCLI extends Configured implements Tool {
         lastModificationTime);
   }
 
-
   @Private
   @VisibleForTesting
   public int printContainerLogsFromRunningApplication(Configuration conf,
@@ -521,7 +550,6 @@ public class LogsCLI extends Configured implements Tool {
       ContainerLogsRequest newOptions = new ContainerLogsRequest(request);
       newOptions.setLogTypes(matchedFiles);
 
-      Client webServiceClient = Client.create();
       boolean foundAnyLogs = false;
       byte[] buffer = new byte[65536];
       for (String logFile : newOptions.getLogTypes()) {
@@ -797,6 +825,10 @@ public class LogsCLI extends Configured implements Tool {
     }
   }
 
+  /**
+   * Create Command Options.
+   * @return the command options
+   */
   private Options createCommandOpts() {
     Options opts = new Options();
     opts.addOption(HELP_CMD, false, "Displays help for all commands.");
@@ -859,6 +891,13 @@ public class LogsCLI extends Configured implements Tool {
     opts.addOption(SIZE_OPTION, true, "Prints the log file's first 'n' bytes "
         + "or the last 'n' bytes. Use negative values as bytes to read from "
         + "the end and positive values as bytes to read from the beginning.");
+    opts.addOption(CLIENT_MAX_RETRY_OPTION, true, "Set max retry number for a"
+        + " retry client to get the container logs for the running "
+        + "applications. Use a negative value to make retry forever. "
+        + "The default value is 30.");
+    opts.addOption(CLIENT_RETRY_INTERVAL_OPTION, true,
+        "Work with --client_max_retries to create a retry client. "
+        + "The default value is 1000.");
     opts.getOption(APPLICATION_ID_OPTION).setArgName("Application ID");
     opts.getOption(CONTAINER_ID_OPTION).setArgName("Container ID");
     opts.getOption(NODE_ADDRESS_OPTION).setArgName("Node Address");
@@ -866,9 +905,17 @@ public class LogsCLI extends Configured implements Tool {
     opts.getOption(AM_CONTAINER_OPTION).setArgName("AM Containers");
     opts.getOption(OUT_OPTION).setArgName("Local Directory");
     opts.getOption(SIZE_OPTION).setArgName("size");
+    opts.getOption(CLIENT_MAX_RETRY_OPTION).setArgName("Max Retries");
+    opts.getOption(CLIENT_RETRY_INTERVAL_OPTION)
+        .setArgName("Retry Interval");
     return opts;
   }
 
+  /**
+   * Create Print options for helper message.
+   * @param commandOpts the options
+   * @return the print options
+   */
   private Options createPrintOpts(Options commandOpts) {
     Options printOpts = new Options();
     printOpts.addOption(commandOpts.getOption(HELP_CMD));
@@ -884,6 +931,8 @@ public class LogsCLI extends Configured implements Tool {
     printOpts.addOption(commandOpts.getOption(SIZE_OPTION));
     printOpts.addOption(commandOpts.getOption(
         PER_CONTAINER_LOG_FILES_REGEX_OPTION));
+    printOpts.addOption(commandOpts.getOption(CLIENT_MAX_RETRY_OPTION));
+    printOpts.addOption(commandOpts.getOption(CLIENT_RETRY_INTERVAL_OPTION));
     return printOpts;
   }
 
@@ -1286,4 +1335,120 @@ public class LogsCLI extends Configured implements Tool {
     return nodeInfo.has("nodeHTTPAddress") ?
         nodeInfo.getString("nodeHTTPAddress") : null;
   }
+
+  // Class to handle retry
+  static class ClientConnectionRetry {
+
+    // maxRetries < 0 means keep trying
+    @Private
+    @VisibleForTesting
+    public int maxRetries;
+
+    @Private
+    @VisibleForTesting
+    public long retryInterval;
+
+    // Indicates if retries happened last time. Only tests should read it.
+    // In unit tests, retryOn() calls should _not_ be concurrent.
+    private boolean retried = false;
+
+    @Private
+    @VisibleForTesting
+    boolean getRetired() {
+      return retried;
+    }
+
+    // Constructor with default retry settings
+    public ClientConnectionRetry(int inputMaxRetries,
+        long inputRetryInterval) {
+      this.maxRetries = inputMaxRetries;
+      this.retryInterval = inputRetryInterval;
+    }
+
+    public Object retryOn(ClientRetryOp op)
+        throws RuntimeException, IOException {
+      int leftRetries = maxRetries;
+      retried = false;
+
+      // keep trying
+      while (true) {
+        try {
+          // try perform the op, if fail, keep retrying
+          return op.run();
+        } catch (IOException | RuntimeException e) {
+          // break if there's no retries left
+          if (leftRetries == 0) {
+            break;
+          }
+          if (op.shouldRetryOn(e)) {
+            logException(e, leftRetries);
+          } else {
+            throw e;
+          }
+        }
+        if (leftRetries > 0) {
+          leftRetries--;
+        }
+        retried = true;
+        try {
+          // sleep for the given time interval
+          Thread.sleep(retryInterval);
+        } catch (InterruptedException ie) {
+          System.out.println("Client retry sleep interrupted! ");
+        }
+      }
+      throw new RuntimeException("Connection retries limit exceeded.");
+    };
+
+    private void logException(Exception e, int leftRetries) {
+      if (leftRetries > 0) {
+        System.out.println("Exception caught by ClientConnectionRetry,"
+              + " will try " + leftRetries + " more time(s).\nMessage: "
+              + e.getMessage());
+      } else {
+        // note that maxRetries may be -1 at the very beginning
+        System.out.println("ConnectionException caught by ClientConnectionRetry,"
+            + " will keep retrying.\nMessage: "
+            + e.getMessage());
+      }
+    }
+  }
+
+  private class ClientJerseyRetryFilter extends ClientFilter {
+    @Override
+    public ClientResponse handle(final ClientRequest cr)
+        throws ClientHandlerException {
+      // Set up the retry operation
+      ClientRetryOp jerseyRetryOp = new ClientRetryOp() {
+        @Override
+        public Object run() {
+          // Try pass the request, if fail, keep retrying
+          return getNext().handle(cr);
+        }
+
+        @Override
+        public boolean shouldRetryOn(Exception e) {
+          // Only retry on connection exceptions
+          return (e instanceof ClientHandlerException)
+              && (e.getCause() instanceof ConnectException ||
+                  e.getCause() instanceof SocketTimeoutException ||
+                  e.getCause() instanceof SocketException);
+        }
+      };
+      try {
+        return (ClientResponse) connectionRetry.retryOn(jerseyRetryOp);
+      } catch (IOException e) {
+        throw new ClientHandlerException("Jersey retry failed!\nMessage: "
+              + e.getMessage());
+      }
+    }
+  }
+
+  // Abstract class for an operation that should be retried by client
+  private static abstract class ClientRetryOp {
+    // The operation that should be retried
+    public abstract Object run() throws IOException;
+    // The method to indicate if we should retry given the incoming exception
+    public abstract boolean shouldRetryOn(Exception e);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a84c24b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index d58732e..fed7488 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -196,7 +196,7 @@ public class TestLogsCLI {
         "Unable to get ApplicationState"));
   }
 
-  @Test(timeout = 5000l)
+  @Test (timeout = 10000)
   public void testHelpMessage() throws Exception {
     Configuration conf = new YarnConfiguration();
     YarnClient mockYarnClient = createMockYarnClient(
@@ -207,79 +207,102 @@ public class TestLogsCLI {
 
     int exitCode = dumper.run(new String[]{});
     assertTrue(exitCode == -1);
+
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     PrintWriter pw = new PrintWriter(baos);
     pw.println("Retrieve logs for YARN applications.");
     pw.println("usage: yarn logs -applicationId <application ID> [OPTIONS]");
     pw.println();
     pw.println("general options are:");
-    pw.println(" -am <AM Containers>                     Prints the AM Container logs for");
-    pw.println("                                         this application. Specify");
-    pw.println("                                         comma-separated value to get logs");
-    pw.println("                                         for related AM Container. For");
-    pw.println("                                         example, If we specify -am 1,2,");
-    pw.println("                                         we will get the logs for the");
-    pw.println("                                         first AM Container as well as the");
-    pw.println("                                         second AM Container. To get logs");
-    pw.println("                                         for all AM Containers, use -am");
-    pw.println("                                         ALL. To get logs for the latest");
-    pw.println("                                         AM Container, use -am -1. By");
-    pw.println("                                         default, it will print all");
-    pw.println("                                         available logs. Work with");
-    pw.println("                                         -log_files to get only specific");
-    pw.println("                                         logs.");
-    pw.println(" -appOwner <Application Owner>           AppOwner (assumed to be current");
-    pw.println("                                         user if not specified)");
-    pw.println(" -containerId <Container ID>             ContainerId. By default, it will");
-    pw.println("                                         print all available logs. Work");
-    pw.println("                                         with -log_files to get only");
-    pw.println("                                         specific logs. If specified, the");
-    pw.println("                                         applicationId can be omitted");
-    pw.println(" -help                                   Displays help for all commands.");
-    pw.println(" -list_nodes                             Show the list of nodes that");
-    pw.println("                                         successfully aggregated logs.");
-    pw.println("                                         This option can only be used with");
-    pw.println("                                         finished applications.");
-    pw.println(" -log_files <Log File Name>              Specify comma-separated value to");
-    pw.println("                                         get exact matched log files. Use");
-    pw.println("                                         \"ALL\" or \"*\" to fetch all the log");
-    pw.println("                                         files for the container.");
-    pw.println(" -log_files_pattern <Log File Pattern>   Specify comma-separated value to");
-    pw.println("                                         get matched log files by using");
-    pw.println("                                         java regex. Use \".*\" to fetch all");
-    pw.println("                                         the log files for the container.");
-    pw.println(" -nodeAddress <Node Address>             NodeAddress in the format");
-    pw.println("                                         nodename:port");
-    pw.println(" -out <Local Directory>                  Local directory for storing");
-    pw.println("                                         individual container logs. The");
-    pw.println("                                         container logs will be stored");
-    pw.println("                                         based on the node the container");
-    pw.println("                                         ran on.");
-    pw.println(" -show_application_log_info              Show the containerIds which");
-    pw.println("                                         belong to the specific");
-    pw.println("                                         Application. You can combine this");
-    pw.println("                                         with --nodeAddress to get");
-    pw.println("                                         containerIds for all the");
-    pw.println("                                         containers on the specific");
-    pw.println("                                         NodeManager.");
-    pw.println(" -show_container_log_info                Show the container log metadata,");
-    pw.println("                                         including log-file names, the");
-    pw.println("                                         size of the log files. You can");
-    pw.println("                                         combine this with --containerId");
-    pw.println("                                         to get log metadata for the");
-    pw.println("                                         specific container, or with");
-    pw.println("                                         --nodeAddress to get log metadata");
-    pw.println("                                         for all the containers on the");
-    pw.println("                                         specific NodeManager.");
-    pw.println(" -size <size>                            Prints the log file's first 'n'");
-    pw.println("                                         bytes or the last 'n' bytes. Use");
-    pw.println("                                         negative values as bytes to read");
-    pw.println("                                         from the end and positive values");
-    pw.println("                                         as bytes to read from the");
-    pw.println("                                         beginning.");
+    pw.println(" -am <AM Containers>                          Prints the AM Container logs");
+    pw.println("                                              for this application.");
+    pw.println("                                              Specify comma-separated");
+    pw.println("                                              value to get logs for");
+    pw.println("                                              related AM Container. For");
+    pw.println("                                              example, If we specify -am");
+    pw.println("                                              1,2, we will get the logs");
+    pw.println("                                              for the first AM Container");
+    pw.println("                                              as well as the second AM");
+    pw.println("                                              Container. To get logs for");
+    pw.println("                                              all AM Containers, use -am");
+    pw.println("                                              ALL. To get logs for the");
+    pw.println("                                              latest AM Container, use -am");
+    pw.println("                                              -1. By default, it will");
+    pw.println("                                              print all available logs.");
+    pw.println("                                              Work with -log_files to get");
+    pw.println("                                              only specific logs.");
+    pw.println(" -appOwner <Application Owner>                AppOwner (assumed to be");
+    pw.println("                                              current user if not");
+    pw.println("                                              specified)");
+    pw.println(" -client_max_retries <Max Retries>            Set max retry number for a");
+    pw.println("                                              retry client to get the");
+    pw.println("                                              container logs for the");
+    pw.println("                                              running applications. Use a");
+    pw.println("                                              negative value to make retry");
+    pw.println("                                              forever. The default value");
+    pw.println("                                              is 30.");
+    pw.println(" -client_retry_interval_ms <Retry Interval>   Work with");
+    pw.println("                                              --client_max_retries to");
+    pw.println("                                              create a retry client. The");
+    pw.println("                                              default value is 1000.");
+    pw.println(" -containerId <Container ID>                  ContainerId. By default, it");
+    pw.println("                                              will print all available");
+    pw.println("                                              logs. Work with -log_files");
+    pw.println("                                              to get only specific logs.");
+    pw.println("                                              If specified, the");
+    pw.println("                                              applicationId can be omitted");
+    pw.println(" -help                                        Displays help for all");
+    pw.println("                                              commands.");
+    pw.println(" -list_nodes                                  Show the list of nodes that");
+    pw.println("                                              successfully aggregated");
+    pw.println("                                              logs. This option can only");
+    pw.println("                                              be used with finished");
+    pw.println("                                              applications.");
+    pw.println(" -log_files <Log File Name>                   Specify comma-separated");
+    pw.println("                                              value to get exact matched");
+    pw.println("                                              log files. Use \"ALL\" or \"*\"");
+    pw.println("                                              to fetch all the log files");
+    pw.println("                                              for the container.");
+    pw.println(" -log_files_pattern <Log File Pattern>        Specify comma-separated");
+    pw.println("                                              value to get matched log");
+    pw.println("                                              files by using java regex.");
+    pw.println("                                              Use \".*\" to fetch all the");
+    pw.println("                                              log files for the container.");
+    pw.println(" -nodeAddress <Node Address>                  NodeAddress in the format");
+    pw.println("                                              nodename:port");
+    pw.println(" -out <Local Directory>                       Local directory for storing");
+    pw.println("                                              individual container logs.");
+    pw.println("                                              The container logs will be");
+    pw.println("                                              stored based on the node the");
+    pw.println("                                              container ran on.");
+    pw.println(" -show_application_log_info                   Show the containerIds which");
+    pw.println("                                              belong to the specific");
+    pw.println("                                              Application. You can combine");
+    pw.println("                                              this with --nodeAddress to");
+    pw.println("                                              get containerIds for all the");
+    pw.println("                                              containers on the specific");
+    pw.println("                                              NodeManager.");
+    pw.println(" -show_container_log_info                     Show the container log");
+    pw.println("                                              metadata, including log-file");
+    pw.println("                                              names, the size of the log");
+    pw.println("                                              files. You can combine this");
+    pw.println("                                              with --containerId to get");
+    pw.println("                                              log metadata for the");
+    pw.println("                                              specific container, or with");
+    pw.println("                                              --nodeAddress to get log");
+    pw.println("                                              metadata for all the");
+    pw.println("                                              containers on the specific");
+    pw.println("                                              NodeManager.");
+    pw.println(" -size <size>                                 Prints the log file's first");
+    pw.println("                                              'n' bytes or the last 'n'");
+    pw.println("                                              bytes. Use negative values");
+    pw.println("                                              as bytes to read from the");
+    pw.println("                                              end and positive values as");
+    pw.println("                                              bytes to read from the");
+    pw.println("                                              beginning.");
     pw.close();
     String appReportStr = baos.toString("UTF-8");
-    Assert.assertEquals(appReportStr, sysOutStream.toString());
+    Assert.assertTrue(sysOutStream.toString().contains(appReportStr));
   }
 
   @Test (timeout = 15000)
@@ -609,6 +632,56 @@ public class TestLogsCLI {
     fs.delete(new Path(rootLogDir), true);
   }
 
+  @Test
+  public void testCheckRetryCount() throws Exception {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    NodeId nodeId = NodeId.newInstance("localhost", 1234);
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId = ApplicationAttemptId
+        .newInstance(appId, 1);
+
+    // Create a mock ApplicationAttempt Report
+    ApplicationAttemptReport mockAttemptReport = mock(
+        ApplicationAttemptReport.class);
+    doReturn(appAttemptId).when(mockAttemptReport).getApplicationAttemptId();
+    List<ApplicationAttemptReport> attemptReports = Arrays.asList(
+        mockAttemptReport);
+
+    // Create one mock containerReport
+    ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
+    ContainerReport mockContainerReport1 = mock(ContainerReport.class);
+    doReturn(containerId1).when(mockContainerReport1).getContainerId();
+    doReturn(nodeId).when(mockContainerReport1).getAssignedNode();
+    doReturn("http://localhost:2345").when(mockContainerReport1)
+        .getNodeHttpAddress();
+    doReturn(ContainerState.RUNNING).when(mockContainerReport1)
+        .getContainerState();
+    List<ContainerReport> containerReports = Arrays.asList(
+        mockContainerReport1);
+    // Mock the YarnClient, and it would report the previous created
+    // mockAttemptReport and previous two created mockContainerReports
+    YarnClient mockYarnClient = createMockYarnClient(
+        YarnApplicationState.RUNNING, ugi.getShortUserName(), true,
+        attemptReports, containerReports);
+    doReturn(mockContainerReport1).when(mockYarnClient).getContainerReport(
+        any(ContainerId.class));
+    LogsCLI cli = new LogsCLIForTest(mockYarnClient);
+    cli.setConf(new YarnConfiguration());
+    try {
+      cli.run(new String[] {"-containerId",
+          containerId1.toString(), "-client_max_retries", "5"});
+      Assert.fail("Exception expected! "
+          + "NodeManager should be off to run this test. ");
+    } catch (RuntimeException ce) {
+      Assert.assertTrue(
+          "Handler exception for reason other than retry: " + ce.getMessage(),
+          ce.getMessage().contains("Connection retries limit exceeded"));
+      Assert.assertTrue("Retry filter didn't perform any retries! ", cli
+           .connectionRetry.getRetired());
+    }
+  }
+
   @Test (timeout = 5000)
   public void testGetRunningContainerLogs() throws Exception {
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] hadoop git commit: YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

Posted by as...@apache.org.
YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

Change-Id: Id00edb7185fdf01cce6e40f920cac3585f8cbe9c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0e16d41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0e16d41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0e16d41

Branch: refs/heads/YARN-6592
Commit: a0e16d41d0d76306eef05e267934d8c2eef15cf6
Parents: bfd1a72
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Aug 3 14:03:55 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Thu Sep 21 18:51:50 2017 -0700

----------------------------------------------------------------------
 .../yarn/api/resource/PlacementConstraint.java  | 567 +++++++++++++++++++
 .../yarn/api/resource/PlacementConstraints.java | 286 ++++++++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../src/main/proto/yarn_protos.proto            |  55 ++
 .../api/resource/TestPlacementConstraints.java  | 106 ++++
 .../PlacementConstraintFromProtoConverter.java  | 116 ++++
 .../pb/PlacementConstraintToProtoConverter.java | 174 ++++++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  27 +
 .../PlacementConstraintTransformations.java     | 209 +++++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../TestPlacementConstraintPBConversion.java    | 195 +++++++
 .../TestPlacementConstraintTransformations.java | 183 ++++++
 13 files changed, 1987 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
new file mode 100644
index 0000000..f0e3982
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -0,0 +1,567 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code PlacementConstraint} represents a placement constraint for a resource
+ * allocation.
+ */
+@Public
+@Unstable
+public class PlacementConstraint {
+
+  /**
+   * The constraint expression tree.
+   */
+  private AbstractConstraint constraintExpr;
+
+  public PlacementConstraint(AbstractConstraint constraintExpr) {
+    this.constraintExpr = constraintExpr;
+  }
+
+  /**
+   * Get the constraint expression of the placement constraint.
+   *
+   * @return the constraint expression
+   */
+  public AbstractConstraint getConstraintExpr() {
+    return constraintExpr;
+  }
+
+  /**
+   * Interface used to enable the elements of the constraint tree to be visited.
+   */
+  @Private
+  public interface Visitable {
+    /**
+     * Visitor pattern.
+     *
+     * @param visitor visitor to be used
+     * @param <T> defines the type that the visitor will use and the return type
+     *          of the accept.
+     * @return the result of visiting a given object.
+     */
+    <T> T accept(Visitor<T> visitor);
+
+  }
+
+  /**
+   * Visitor API for a constraint tree.
+   *
+   * @param <T> determines the return type of the visit methods.
+   */
+  @Private
+  public interface Visitor<T> {
+    T visit(SingleConstraint constraint);
+
+    T visit(TargetExpression target);
+
+    T visit(TargetConstraint constraint);
+
+    T visit(CardinalityConstraint constraint);
+
+    T visit(And constraint);
+
+    T visit(Or constraint);
+
+    T visit(DelayedOr constraint);
+
+    T visit(TimedPlacementConstraint constraint);
+  }
+
+  /**
+   * Abstract class that acts as the superclass of all placement constraint
+   * classes.
+   */
+  public abstract static class AbstractConstraint implements Visitable {
+    public PlacementConstraint build() {
+      return new PlacementConstraint(this);
+    }
+  }
+
+  static final String NODE_SCOPE = "node";
+  static final String RACK_SCOPE = "rack";
+
+  /**
+   * Consider a set of nodes N that belongs to the scope specified in the
+   * constraint. If the target expressions are satisfied at least minCardinality
+   * times and at most max-cardinality times in the node set N, then the
+   * constraint is satisfied.
+   *
+   * For example, a constraint of the form {@code {RACK, 2, 10,
+   * allocationTag("zk")}}, requires an allocation to be placed within a rack
+   * that has at least 2 and at most 10 other allocations with tag "zk".
+   */
+  public static class SingleConstraint extends AbstractConstraint {
+    private String scope;
+    private int minCardinality;
+    private int maxCardinality;
+    private Set<TargetExpression> targetExpressions;
+
+    public SingleConstraint(String scope, int minCardinality,
+        int maxCardinality, Set<TargetExpression> targetExpressions) {
+      this.scope = scope;
+      this.minCardinality = minCardinality;
+      this.maxCardinality = maxCardinality;
+      this.targetExpressions = targetExpressions;
+    }
+
+    public SingleConstraint(String scope, int minC, int maxC,
+        TargetExpression... targetExpressions) {
+      this(scope, minC, maxC, new HashSet<>(Arrays.asList(targetExpressions)));
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the minimum cardinality of the constraint.
+     *
+     * @return the minimum cardinality of the constraint
+     */
+    public int getMinCardinality() {
+      return minCardinality;
+    }
+
+    /**
+     * Get the maximum cardinality of the constraint.
+     *
+     * @return the maximum cardinality of the constraint
+     */
+    public int getMaxCardinality() {
+      return maxCardinality;
+    }
+
+    /**
+     * Get the target expressions of the constraint.
+     *
+     * @return the set of target expressions
+     */
+    public Set<TargetExpression> getTargetExpressions() {
+      return targetExpressions;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class representing the target expressions that are used in placement
+   * constraints. They might refer to expressions on node attributes, allocation
+   * tags, or be self-targets (referring to the allocation to which the
+   * constraint is attached).
+   */
+  public static class TargetExpression implements Visitable {
+    /**
+     * Enum specifying the type of the target expression.
+     */
+    public enum TargetType {
+      NODE_ATTRIBUTE, ALLOCATION_TAG, SELF
+    }
+
+    private TargetType targetType;
+    private String targetKey;
+    private Set<String> targetValues;
+
+    public TargetExpression(TargetType targetType, String targetKey,
+        Set<String> targetValues) {
+      this.targetType = targetType;
+      this.targetKey = targetKey;
+      this.targetValues = targetValues;
+    }
+
+    public TargetExpression(TargetType targetType) {
+      this(targetType, null, new HashSet<>());
+    }
+
+    public TargetExpression(TargetType targetType, String targetKey,
+        String... targetValues) {
+      this(targetType, targetKey, new HashSet<>(Arrays.asList(targetValues)));
+    }
+
+    /**
+     * Get the type of the target expression.
+     *
+     * @return the type of the target expression
+     */
+    public TargetType getTargetType() {
+      return targetType;
+    }
+
+    /**
+     * Get the key of the target expression.
+     *
+     * @return the key of the target expression
+     */
+    public String getTargetKey() {
+      return targetKey;
+    }
+
+    /**
+     * Get the set of values of the target expression.
+     *
+     * @return the set of values of the target expression
+     */
+    public Set<String> getTargetValues() {
+      return targetValues;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = targetType != null ? targetType.hashCode() : 0;
+      result = 31 * result + (targetKey != null ? targetKey.hashCode() : 0);
+      result =
+          31 * result + (targetValues != null ? targetValues.hashCode() : 0);
+      return result;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null) {
+        return false;
+      }
+      if (!(o instanceof TargetExpression)) {
+        return false;
+      }
+
+      TargetExpression that = (TargetExpression) o;
+      if (targetType != that.targetType) {
+        return false;
+      }
+      if (targetKey != null ? !targetKey.equals(that.targetKey)
+          : that.targetKey != null) {
+        return false;
+      }
+      return targetValues != null ? targetValues.equals(that.targetValues)
+          : that.targetValues == null;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a target constraint. Such a constraint requires an
+   * allocation to be placed within a scope that satisfies some specified
+   * expressions on node attributes and allocation tags.
+   *
+   * It is a specialized version of the {@link SingleConstraint}, where the
+   * minimum and the maximum cardinalities take specific values based on the
+   * {@link TargetOperator} used.
+   */
+  public static class TargetConstraint extends AbstractConstraint {
+    enum TargetOperator {
+      IN, NOT_IN
+    }
+
+    private TargetOperator op;
+    private String scope;
+    private Set<TargetExpression> targetExpressions;
+
+    public TargetConstraint(TargetOperator op, String scope,
+        Set<TargetExpression> targetExpressions) {
+      this.op = op;
+      this.scope = scope;
+      this.targetExpressions = targetExpressions;
+    }
+
+    /**
+     * Get the target operator of the constraint.
+     *
+     * @return the target operator
+     */
+    public TargetOperator getOp() {
+      return op;
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the set of target expressions.
+     *
+     * @return the set of target expressions
+     */
+    public Set<TargetExpression> getTargetExpressions() {
+      return targetExpressions;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a cardinality constraint. Such a constraint the
+   * number of allocations within a given scope to some minimum and maximum
+   * values.
+   *
+   * It is a specialized version of the {@link SingleConstraint}, where the
+   * target is self (i.e., the allocation to which the constraint is attached).
+   */
+  public static class CardinalityConstraint extends AbstractConstraint {
+    private String scope;
+    private int minCardinality;
+    private int maxCardinality;
+
+    public CardinalityConstraint(String scope, int minCardinality,
+        int maxCardinality) {
+      this.scope = scope;
+      this.minCardinality = minCardinality;
+      this.maxCardinality = maxCardinality;
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the minimum cardinality of the constraint.
+     *
+     * @return the minimum cardinality of the constraint
+     */
+    public int getMinCardinality() {
+      return minCardinality;
+    }
+
+    /**
+     * Get the maximum cardinality of the constraint.
+     *
+     * @return the maximum cardinality of the constraint
+     */
+    public int getMaxCardinality() {
+      return maxCardinality;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents composite constraints, which comprise other
+   * constraints, forming a constraint tree.
+   *
+   * @param <R> the type of constraints that are used as children of the
+   *          specific composite constraint
+   */
+  public abstract static class CompositeConstraint<R extends Visitable>
+      extends AbstractConstraint {
+
+    /**
+     * Get the children of this composite constraint.
+     *
+     * @return the children of the composite constraint
+     */
+    public abstract List<R> getChildren();
+  }
+
+  /**
+   * Class that represents a composite constraint that is a conjunction of other
+   * constraints.
+   */
+  public static class And extends CompositeConstraint<AbstractConstraint> {
+    private List<AbstractConstraint> children;
+
+    public And(List<AbstractConstraint> children) {
+      this.children = children;
+    }
+
+    public And(AbstractConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<AbstractConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a composite constraint that is a disjunction of other
+   * constraints.
+   */
+  public static class Or extends CompositeConstraint<AbstractConstraint> {
+    private List<AbstractConstraint> children;
+
+    public Or(List<AbstractConstraint> children) {
+      this.children = children;
+    }
+
+    public Or(AbstractConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<AbstractConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a composite constraint that comprises a list of timed
+   * placement constraints (see {@link TimedPlacementConstraint}). The scheduler
+   * should try to satisfy first the first timed child constraint within the
+   * specified time window. If this is not possible, it should attempt to
+   * satisfy the second, and so on.
+   */
+  public static class DelayedOr
+      extends CompositeConstraint<TimedPlacementConstraint> {
+    private List<TimedPlacementConstraint> children = new ArrayList<>();
+
+    public DelayedOr(List<TimedPlacementConstraint> children) {
+      this.children = children;
+    }
+
+    public DelayedOr(TimedPlacementConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<TimedPlacementConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Represents a timed placement constraint that has to be satisfied within a
+   * time window.
+   */
+  public static class TimedPlacementConstraint implements Visitable {
+    /**
+     * The unit of scheduling delay.
+     */
+    public enum DelayUnit {
+      MILLISECONDS, OPPORTUNITIES
+    }
+
+    private AbstractConstraint constraint;
+    private long schedulingDelay;
+    private DelayUnit delayUnit;
+
+    public TimedPlacementConstraint(AbstractConstraint constraint,
+        long schedulingDelay, DelayUnit delayUnit) {
+      this.constraint = constraint;
+      this.schedulingDelay = schedulingDelay;
+      this.delayUnit = delayUnit;
+    }
+
+    public TimedPlacementConstraint(AbstractConstraint constraint,
+        long schedulingDelay) {
+      this(constraint, schedulingDelay, DelayUnit.MILLISECONDS);
+    }
+
+    public TimedPlacementConstraint(AbstractConstraint constraint) {
+      this(constraint, Long.MAX_VALUE, DelayUnit.MILLISECONDS);
+    }
+
+    /**
+     * Get the constraint that has to be satisfied within the time window.
+     *
+     * @return the constraint to be satisfied
+     */
+    public AbstractConstraint getConstraint() {
+      return constraint;
+    }
+
+    /**
+     * Sets the constraint that has to be satisfied within the time window.
+     *
+     * @param constraint the constraint to be satisfied
+     */
+    public void setConstraint(AbstractConstraint constraint) {
+      this.constraint = constraint;
+    }
+
+    /**
+     * Get the scheduling delay value that determines the time window within
+     * which the constraint has to be satisfied.
+     *
+     * @return the value of the scheduling delay
+     */
+    public long getSchedulingDelay() {
+      return schedulingDelay;
+    }
+
+    /**
+     * The unit of the scheduling delay.
+     *
+     * @return the unit of the delay
+     */
+    public DelayUnit getDelayUnit() {
+      return delayUnit;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
new file mode 100644
index 0000000..8e84280
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+
+/**
+ * This class contains various static methods for the applications to create
+ * placement constraints (see also {@link PlacementConstraint}).
+ */
+@Public
+@Unstable
+public final class PlacementConstraints {
+
+  // Suppresses default constructor, ensuring non-instantiability.
+  private PlacementConstraints() {
+  }
+
+  // Creation of simple constraints.
+
+  public static final String NODE = PlacementConstraint.NODE_SCOPE;
+  public static final String RACK = PlacementConstraint.RACK_SCOPE;
+
+  /**
+   * Creates a constraint that requires allocations to be placed on nodes that
+   * satisfy all target expressions within the given scope (e.g., node or rack).
+   *
+   * For example, {@code targetIn(RACK, allocationTag("hbase-m"))}, allows
+   * allocations on nodes that belong to a rack that has at least one tag with
+   * value "hbase-m".
+   *
+   * @param scope the scope within which the target expressions should be
+   *          satisfied
+   * @param targetExpressions the expressions that need to be satisfied within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetIn(String scope,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, 1, Integer.MAX_VALUE, targetExpressions);
+  }
+
+  /**
+   * Creates a constraint that requires allocations to be placed on nodes that
+   * belong to a scope (e.g., node or rack) that does not satisfy any of the
+   * target expressions.
+   *
+   * @param scope the scope within which the target expressions should not be
+   *          true
+   * @param targetExpressions the expressions that need to not be true within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetNotIn(String scope,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, 0, 0, targetExpressions);
+  }
+
+  /**
+   * Creates a constraint that restricts the number of allocations within a
+   * given scope (e.g., node or rack).
+   *
+   * For example, {@code cardinality(NODE, 3, 10)}, restricts the number of
+   * allocations per node to be no less than 3 and no more than 10.
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality determines the minimum number of allocations within
+   *          the scope
+   * @param maxCardinality determines the maximum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint cardinality(String scope, int minCardinality,
+      int maxCardinality) {
+    return new SingleConstraint(scope, minCardinality, maxCardinality,
+        PlacementTargets.self());
+  }
+
+  /**
+   * Similar to {@link #cardinality(String, int, int)}, but determines only the
+   * minimum cardinality (the maximum cardinality is unbound).
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality determines the minimum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint minCardinality(String scope,
+      int minCardinality) {
+    return cardinality(scope, minCardinality, Integer.MAX_VALUE);
+  }
+
+  /**
+   * Similar to {@link #cardinality(String, int, int)}, but determines only the
+   * maximum cardinality (the minimum can be as low as 0).
+   *
+   * @param scope the scope of the constraint
+   * @param maxCardinality determines the maximum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint maxCardinality(String scope,
+      int maxCardinality) {
+    return cardinality(scope, 0, maxCardinality);
+  }
+
+  /**
+   * This constraint generalizes the cardinality and target constraints.
+   *
+   * Consider a set of nodes N that belongs to the scope specified in the
+   * constraint. If the target expressions are satisfied at least minCardinality
+   * times and at most max-cardinality times in the node set N, then the
+   * constraint is satisfied.
+   *
+   * For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
+   * requires an allocation to be placed within a rack that has at least 2 and
+   * at most 10 other allocations with tag "zk".
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality the minimum number of times the target expressions
+   *          have to be satisfied with the given scope
+   * @param maxCardinality the maximum number of times the target expressions
+   *          have to be satisfied with the given scope
+   * @param targetExpressions the target expressions
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetCardinality(String scope,
+      int minCardinality, int maxCardinality,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, minCardinality, maxCardinality,
+        targetExpressions);
+  }
+
+  // Creation of target expressions to be used in simple constraints.
+
+  /**
+   * Class with static methods for constructing target expressions to be used in
+   * placement constraints.
+   */
+  public static class PlacementTargets {
+
+    /**
+     * Constructs a target expression on a node attribute. It is satisfied if
+     * the specified node attribute has one of the specified values.
+     *
+     * @param attributeKey the name of the node attribute
+     * @param attributeValues the set of values that the attribute should take
+     *          values from
+     * @return the resulting expression on the node attribute
+     */
+    public static TargetExpression nodeAttribute(String attributeKey,
+        String... attributeValues) {
+      return new TargetExpression(TargetType.NODE_ATTRIBUTE, attributeKey,
+          attributeValues);
+    }
+
+    /**
+     * Constructs a target expression on an allocation tag. It is satisfied if
+     * the there are allocations with one of the given tags.
+     *
+     * @param allocationTags the set of tags that the attribute should take
+     *          values from
+     * @return the resulting expression on the allocation tags
+     */
+    public static TargetExpression allocationTag(String... allocationTags) {
+      return new TargetExpression(TargetType.ALLOCATION_TAG, null,
+          allocationTags);
+    }
+
+    /**
+     * The default target expression that uses as target the allocation that
+     * specifies the constraint.
+     *
+     * @return the self-target
+     */
+    public static TargetExpression self() {
+      return new TargetExpression(TargetType.SELF);
+    }
+  }
+
+  // Creation of compound constraints.
+
+  /**
+   * A conjunction of constraints.
+   *
+   * @param children the children constraints that should all be satisfied
+   * @return the resulting placement constraint
+   */
+  public static And and(AbstractConstraint... children) {
+    return new And(children);
+  }
+
+  /**
+   * A disjunction of constraints.
+   *
+   * @param children the children constraints, one of which should be satisfied
+   * @return the resulting placement constraint
+   */
+  public static Or or(AbstractConstraint... children) {
+    return new Or(children);
+  }
+
+  /**
+   * Creates a composite constraint that includes a list of timed placement
+   * constraints. The scheduler should try to satisfy first the first timed
+   * child constraint within the specified time window. If this is not possible,
+   * it should attempt to satisfy the second, and so on.
+   *
+   * @param children the timed children constraints
+   * @return the resulting composite constraint
+   */
+  public static DelayedOr delayedOr(TimedPlacementConstraint... children) {
+    return new DelayedOr(children);
+  }
+
+  // Creation of timed constraints to be used in a DELAYED_OR constraint.
+
+  /**
+   * Creates a placement constraint that has to be satisfied within a time
+   * window.
+   *
+   * @param constraint the placement constraint
+   * @param delay the length of the time window within which the constraint has
+   *          to be satisfied
+   * @param timeUnit the unit of time of the time window
+   * @return the resulting timed placement constraint
+   */
+  public static TimedPlacementConstraint timedClockConstraint(
+      AbstractConstraint constraint, long delay, TimeUnit timeUnit) {
+    return new TimedPlacementConstraint(constraint, timeUnit.toMillis(delay),
+        TimedPlacementConstraint.DelayUnit.MILLISECONDS);
+  }
+
+  /**
+   * Creates a placement constraint that has to be satisfied within a number of
+   * placement opportunities (invocations of the scheduler).
+   *
+   * @param constraint the placement constraint
+   * @param delay the number of scheduling opportunities within which the
+   *          constraint has to be satisfied
+   * @return the resulting timed placement constraint
+   */
+  public static TimedPlacementConstraint timedOpportunitiesConstraint(
+      AbstractConstraint constraint, long delay) {
+    return new TimedPlacementConstraint(constraint, delay,
+        TimedPlacementConstraint.DelayUnit.OPPORTUNITIES);
+  }
+
+  /**
+   * Creates a {@link PlacementConstraint} given a constraint expression.
+   *
+   * @param constraintExpr the constraint expression
+   * @return the placement constraint
+   */
+  public static PlacementConstraint build(AbstractConstraint constraintExpr) {
+    return constraintExpr.build();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
new file mode 100644
index 0000000..660dc02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to resources.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.resource;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index fb340d1..afd5fa5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -537,6 +537,61 @@ enum SignalContainerCommandProto {
   FORCEFUL_SHUTDOWN = 3;
 }
 
+////////////////////////////////////////////////////////////////////////
+////// Placement constraints ///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+
+message PlacementConstraintProto {
+  optional SimplePlacementConstraintProto simpleConstraint = 1;
+  optional CompositePlacementConstraintProto compositeConstraint = 2;
+}
+
+message SimplePlacementConstraintProto {
+  required string scope = 1;
+  repeated PlacementConstraintTargetProto targetExpressions = 2;
+  optional int32 minCardinality = 3;
+  optional int32 maxCardinality = 4;
+}
+
+message PlacementConstraintTargetProto {
+  enum TargetType {
+    NODE_ATTRIBUTE = 1;
+    ALLOCATION_TAG = 2;
+    SELF = 3;
+  }
+
+  required TargetType targetType = 1;
+  optional string targetKey = 2;
+  repeated string targetValues = 3;
+}
+
+message TimedPlacementConstraintProto {
+  enum DelayUnit {
+    MILLISECONDS = 1;
+    OPPORTUNITIES = 2;
+  }
+
+  required PlacementConstraintProto placementConstraint = 1;
+  required int64 schedulingDelay = 2;
+  optional DelayUnit delayUnit = 3 [ default = MILLISECONDS ];
+}
+
+message CompositePlacementConstraintProto {
+  enum CompositeType {
+    // All children constraints have to be satisfied.
+    AND = 1;
+    // One of the children constraints has to be satisfied.
+    OR = 2;
+    // Attempt to satisfy the first child constraint for delays[0] units (e.g.,
+    // millisec or heartbeats). If this fails, try to satisfy the second child
+    // constraint for delays[1] units and so on.
+    DELAYED_OR = 3;
+  }
+
+  required CompositeType compositeType = 1;
+  repeated PlacementConstraintProto childConstraints = 2;
+  repeated TimedPlacementConstraintProto timedChildConstraints = 3;
+}
 
 ////////////////////////////////////////////////////////////////////////
 ////// From reservation_protocol /////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
new file mode 100644
index 0000000..e25d477
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.and;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.nodeAttribute;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for the various static methods in
+ * {@link org.apache.hadoop.yarn.api.resource.PlacementConstraints}.
+ */
+public class TestPlacementConstraints {
+
+  @Test
+  public void testNodeAffinityToTag() {
+    AbstractConstraint constraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+
+    SingleConstraint sConstraint = (SingleConstraint) constraintExpr;
+    Assert.assertEquals(NODE, sConstraint.getScope());
+    Assert.assertEquals(1, sConstraint.getMinCardinality());
+    Assert.assertEquals(Integer.MAX_VALUE, sConstraint.getMaxCardinality());
+
+    Assert.assertEquals(1, sConstraint.getTargetExpressions().size());
+    TargetExpression tExpr =
+        sConstraint.getTargetExpressions().iterator().next();
+    Assert.assertNull(tExpr.getTargetKey());
+    Assert.assertEquals(TargetType.ALLOCATION_TAG, tExpr.getTargetType());
+    Assert.assertEquals(1, tExpr.getTargetValues().size());
+    Assert.assertEquals("hbase-m", tExpr.getTargetValues().iterator().next());
+
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Assert.assertNotNull(constraint.getConstraintExpr());
+  }
+
+  @Test
+  public void testNodeAntiAffinityToAttribute() {
+    AbstractConstraint constraintExpr =
+        targetNotIn(NODE, nodeAttribute("java", "1.8"));
+
+    SingleConstraint sConstraint = (SingleConstraint) constraintExpr;
+    Assert.assertEquals(NODE, sConstraint.getScope());
+    Assert.assertEquals(0, sConstraint.getMinCardinality());
+    Assert.assertEquals(0, sConstraint.getMaxCardinality());
+
+    Assert.assertEquals(1, sConstraint.getTargetExpressions().size());
+    TargetExpression tExpr =
+        sConstraint.getTargetExpressions().iterator().next();
+    Assert.assertEquals("java", tExpr.getTargetKey());
+    Assert.assertEquals(TargetType.NODE_ATTRIBUTE, tExpr.getTargetType());
+    Assert.assertEquals(1, tExpr.getTargetValues().size());
+    Assert.assertEquals("1.8", tExpr.getTargetValues().iterator().next());
+  }
+
+  @Test
+  public void testAndConstraint() {
+    AbstractConstraint constraintExpr =
+        and(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+
+    And andExpr = (And) constraintExpr;
+    Assert.assertEquals(3, andExpr.getChildren().size());
+    SingleConstraint sConstr = (SingleConstraint) andExpr.getChildren().get(0);
+    TargetExpression tExpr = sConstr.getTargetExpressions().iterator().next();
+    Assert.assertEquals("spark", tExpr.getTargetValues().iterator().next());
+
+    sConstr = (SingleConstraint) andExpr.getChildren().get(1);
+    Assert.assertEquals(0, sConstr.getMinCardinality());
+    Assert.assertEquals(3, sConstr.getMaxCardinality());
+
+    sConstr = (SingleConstraint) andExpr.getChildren().get(2);
+    Assert.assertEquals(2, sConstr.getMinCardinality());
+    Assert.assertEquals(10, sConstr.getMaxCardinality());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
new file mode 100644
index 0000000..926b6fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.pb;
+
+import static org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType.AND;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
+
+/**
+ * {@code PlacementConstraintFromProtoConverter} generates an
+ * {@link PlacementConstraint.AbstractConstraint} given a
+ * {@link PlacementConstraintProto}.
+ */
+@Private
+public class PlacementConstraintFromProtoConverter {
+
+  private PlacementConstraintProto constraintProto;
+
+  public PlacementConstraintFromProtoConverter(
+      PlacementConstraintProto constraintProto) {
+    this.constraintProto = constraintProto;
+  }
+
+  public PlacementConstraint convert() {
+    return new PlacementConstraint(convert(constraintProto));
+  }
+
+  private AbstractConstraint convert(PlacementConstraintProto proto) {
+    return proto.hasSimpleConstraint() ? convert(proto.getSimpleConstraint())
+        : convert(proto.getCompositeConstraint());
+  }
+
+  private SingleConstraint convert(SimplePlacementConstraintProto proto) {
+    Set<TargetExpression> targets = new HashSet<>();
+    for (PlacementConstraintTargetProto tp : proto.getTargetExpressionsList()) {
+      targets.add(convert(tp));
+    }
+
+    return new SingleConstraint(proto.getScope(), proto.getMinCardinality(),
+        proto.getMaxCardinality(), targets);
+  }
+
+  private TargetExpression convert(PlacementConstraintTargetProto proto) {
+    return new TargetExpression(
+        ProtoUtils.convertFromProtoFormat(proto.getTargetType()),
+        proto.hasTargetKey() ? proto.getTargetKey() : null,
+        new HashSet<>(proto.getTargetValuesList()));
+  }
+
+  private AbstractConstraint convert(CompositePlacementConstraintProto proto) {
+    switch (proto.getCompositeType()) {
+    case AND:
+    case OR:
+      List<AbstractConstraint> children = new ArrayList<>();
+      for (PlacementConstraintProto cp : proto.getChildConstraintsList()) {
+        children.add(convert(cp));
+      }
+      return (proto.getCompositeType() == AND) ? new And(children)
+          : new Or(children);
+    case DELAYED_OR:
+      List<TimedPlacementConstraint> tChildren = new ArrayList<>();
+      for (TimedPlacementConstraintProto cp : proto
+          .getTimedChildConstraintsList()) {
+        tChildren.add(convert(cp));
+      }
+      return new DelayedOr(tChildren);
+    default:
+      throw new YarnRuntimeException(
+          "Encountered unexpected type of composite constraint.");
+    }
+  }
+
+  private TimedPlacementConstraint convert(
+      TimedPlacementConstraintProto proto) {
+    AbstractConstraint pConstraint = convert(proto.getPlacementConstraint());
+
+    return new TimedPlacementConstraint(pConstraint, proto.getSchedulingDelay(),
+        ProtoUtils.convertFromProtoFormat(proto.getDelayUnit()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
new file mode 100644
index 0000000..7816e18
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CompositeConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
+
+import com.google.protobuf.GeneratedMessage;
+
+/**
+ * {@code PlacementConstraintToProtoConverter} generates a
+ * {@link PlacementConstraintProto} given a
+ * {@link PlacementConstraint.AbstractConstraint}.
+ */
+@Private
+public class PlacementConstraintToProtoConverter
+    implements PlacementConstraint.Visitor<GeneratedMessage> {
+
+  private PlacementConstraint placementConstraint;
+
+  public PlacementConstraintToProtoConverter(
+      PlacementConstraint placementConstraint) {
+    this.placementConstraint = placementConstraint;
+  }
+
+  public PlacementConstraintProto convert() {
+    return (PlacementConstraintProto) placementConstraint.getConstraintExpr()
+        .accept(this);
+  }
+
+  @Override
+  public GeneratedMessage visit(SingleConstraint constraint) {
+    SimplePlacementConstraintProto.Builder sb =
+        SimplePlacementConstraintProto.newBuilder();
+
+    if (constraint.getScope() != null) {
+      sb.setScope(constraint.getScope());
+    }
+    sb.setMinCardinality(constraint.getMinCardinality());
+    sb.setMaxCardinality(constraint.getMaxCardinality());
+    if (constraint.getTargetExpressions() != null) {
+      for (TargetExpression target : constraint.getTargetExpressions()) {
+        sb.addTargetExpressions(
+            (PlacementConstraintTargetProto) target.accept(this));
+      }
+
+    }
+    SimplePlacementConstraintProto sProto = sb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setSimpleConstraint(sProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TargetExpression target) {
+    PlacementConstraintTargetProto.Builder tb =
+        PlacementConstraintTargetProto.newBuilder();
+
+    tb.setTargetType(ProtoUtils.convertToProtoFormat(target.getTargetType()));
+    if (target.getTargetKey() != null) {
+      tb.setTargetKey(target.getTargetKey());
+    }
+    if (target.getTargetValues() != null) {
+      tb.addAllTargetValues(target.getTargetValues());
+    }
+    return tb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TargetConstraint constraint) {
+    throw new YarnRuntimeException("Unexpected TargetConstraint found.");
+  }
+
+  @Override
+  public GeneratedMessage visit(CardinalityConstraint constraint) {
+    throw new YarnRuntimeException("Unexpected CardinalityConstraint found.");
+  }
+
+  private GeneratedMessage visitAndOr(
+      CompositeConstraint<AbstractConstraint> composite, CompositeType type) {
+    CompositePlacementConstraintProto.Builder cb =
+        CompositePlacementConstraintProto.newBuilder();
+
+    cb.setCompositeType(type);
+
+    for (AbstractConstraint c : composite.getChildren()) {
+      cb.addChildConstraints((PlacementConstraintProto) c.accept(this));
+    }
+    CompositePlacementConstraintProto cProto = cb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setCompositeConstraint(cProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(And constraint) {
+    return visitAndOr(constraint, CompositeType.AND);
+  }
+
+  @Override
+  public GeneratedMessage visit(Or constraint) {
+    return visitAndOr(constraint, CompositeType.OR);
+  }
+
+  @Override
+  public GeneratedMessage visit(DelayedOr constraint) {
+    CompositePlacementConstraintProto.Builder cb =
+        CompositePlacementConstraintProto.newBuilder();
+
+    cb.setCompositeType(CompositeType.DELAYED_OR);
+
+    for (TimedPlacementConstraint c : constraint.getChildren()) {
+      cb.addTimedChildConstraints(
+          (TimedPlacementConstraintProto) c.accept(this));
+    }
+    CompositePlacementConstraintProto cProto = cb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setCompositeConstraint(cProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TimedPlacementConstraint constraint) {
+    TimedPlacementConstraintProto.Builder tb =
+        TimedPlacementConstraintProto.newBuilder();
+
+    tb.setDelayUnit(ProtoUtils.convertToProtoFormat(constraint.getDelayUnit()));
+    tb.setSchedulingDelay(constraint.getSchedulingDelay());
+    tb.setPlacementConstraint(
+        (PlacementConstraintProto) constraint.getConstraint().accept(this));
+
+    return tb.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
new file mode 100644
index 0000000..18da80f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to protobuf objects that are not backed by PBImpl classes.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.pb;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 158c2ae..334ed94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -54,6 +54,8 @@ import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
 import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto;
@@ -67,10 +69,12 @@ import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationRequestInterpreterProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryPolicyProto;
@@ -479,6 +483,29 @@ public class ProtoUtils {
     }
     return ret;
   }
+
+  public static PlacementConstraintTargetProto.TargetType convertToProtoFormat(
+          TargetExpression.TargetType t) {
+    return PlacementConstraintTargetProto.TargetType.valueOf(t.name());
+  }
+
+  public static TargetExpression.TargetType convertFromProtoFormat(
+          PlacementConstraintTargetProto.TargetType t) {
+    return TargetExpression.TargetType.valueOf(t.name());
+  }
+
+  /*
+   * TimedPlacementConstraint.DelayUnit
+   */
+  public static TimedPlacementConstraintProto.DelayUnit convertToProtoFormat(
+          TimedPlacementConstraint.DelayUnit u) {
+    return TimedPlacementConstraintProto.DelayUnit.valueOf(u.name());
+  }
+
+  public static TimedPlacementConstraint.DelayUnit convertFromProtoFormat(
+          TimedPlacementConstraintProto.DelayUnit u) {
+    return TimedPlacementConstraint.DelayUnit.valueOf(u.name());
+  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
new file mode 100644
index 0000000..e9eda6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ListIterator;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CompositeConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint.TargetOperator;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+/**
+ * This class contains inner classes that define transformation on a
+ * {@link PlacementConstraint} expression.
+ */
+@Private
+public class PlacementConstraintTransformations {
+
+  /**
+   * The default implementation of the {@link PlacementConstraint.Visitor} that
+   * does a traversal of the constraint tree, performing no action for the lead
+   * constraints.
+   */
+  public static class AbstractTransformer
+      implements PlacementConstraint.Visitor<AbstractConstraint> {
+
+    private PlacementConstraint placementConstraint;
+
+    public AbstractTransformer(PlacementConstraint placementConstraint) {
+      this.placementConstraint = placementConstraint;
+    }
+
+    /**
+     * This method performs the transformation of the
+     * {@link #placementConstraint}.
+     *
+     * @return the transformed placement constraint.
+     */
+    public PlacementConstraint transform() {
+      AbstractConstraint constraintExpr =
+          placementConstraint.getConstraintExpr();
+
+      // Visit the constraint tree to perform the transformation.
+      constraintExpr = constraintExpr.accept(this);
+
+      return new PlacementConstraint(constraintExpr);
+    }
+
+    @Override
+    public AbstractConstraint visit(SingleConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetExpression expression) {
+      // Do nothing.
+      return null;
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(CardinalityConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    private AbstractConstraint visitAndOr(
+        CompositeConstraint<AbstractConstraint> constraint) {
+      for (ListIterator<AbstractConstraint> iter =
+          constraint.getChildren().listIterator(); iter.hasNext();) {
+        AbstractConstraint child = iter.next();
+        child = child.accept(this);
+        iter.set(child);
+      }
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(And constraint) {
+      return visitAndOr(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(Or constraint) {
+      return visitAndOr(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(DelayedOr constraint) {
+      constraint.getChildren().forEach(
+          child -> child.setConstraint(child.getConstraint().accept(this)));
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(TimedPlacementConstraint constraint) {
+      // Do nothing.
+      return null;
+    }
+  }
+
+  /**
+   * Visits a {@link PlacementConstraint} tree and substitutes each
+   * {@link TargetConstraint} and {@link CardinalityConstraint} with an
+   * equivalent {@link SingleConstraint}.
+   */
+  public static class SingleConstraintTransformer extends AbstractTransformer {
+
+    public SingleConstraintTransformer(PlacementConstraint constraint) {
+      super(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetConstraint constraint) {
+      AbstractConstraint newConstraint;
+      if (constraint.getOp() == TargetOperator.IN) {
+        newConstraint = new SingleConstraint(constraint.getScope(), 1,
+            Integer.MAX_VALUE, constraint.getTargetExpressions());
+      } else if (constraint.getOp() == TargetOperator.NOT_IN) {
+        newConstraint = new SingleConstraint(constraint.getScope(), 0, 0,
+            constraint.getTargetExpressions());
+      } else {
+        throw new YarnRuntimeException(
+            "Encountered unexpected type of constraint target operator: "
+                + constraint.getOp());
+      }
+      return newConstraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(CardinalityConstraint constraint) {
+      return new SingleConstraint(constraint.getScope(),
+          constraint.getMinCardinality(), constraint.getMaxCardinality(),
+          new TargetExpression(TargetExpression.TargetType.SELF));
+    }
+  }
+
+  /**
+   * Visits a {@link PlacementConstraint} tree and, whenever possible,
+   * substitutes each {@link SingleConstraint} with a {@link TargetConstraint}
+   * or a {@link CardinalityConstraint}. When such a substitution is not
+   * possible, we keep the original {@link SingleConstraint}.
+   */
+  public static class SpecializedConstraintTransformer
+      extends AbstractTransformer {
+
+    public SpecializedConstraintTransformer(PlacementConstraint constraint) {
+      super(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(SingleConstraint constraint) {
+      AbstractConstraint transformedConstraint = constraint;
+      // Check if it is a cardinality constraint.
+      if (constraint.getTargetExpressions().size() == 1) {
+        TargetExpression targetExpr =
+            constraint.getTargetExpressions().iterator().next();
+        if (targetExpr.getTargetType() == TargetExpression.TargetType.SELF) {
+          transformedConstraint = new CardinalityConstraint(
+              constraint.getScope(), constraint.getMinCardinality(),
+              constraint.getMaxCardinality());
+        }
+      }
+      // Check if it is a target constraint.
+      if (constraint.getMinCardinality() == 1
+          && constraint.getMaxCardinality() == Integer.MAX_VALUE) {
+        transformedConstraint = new TargetConstraint(TargetOperator.IN,
+            constraint.getScope(), constraint.getTargetExpressions());
+      } else if (constraint.getMinCardinality() == 0
+          && constraint.getMaxCardinality() == 0) {
+        transformedConstraint = new TargetConstraint(TargetOperator.NOT_IN,
+            constraint.getScope(), constraint.getTargetExpressions());
+      }
+
+      return transformedConstraint;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
new file mode 100644
index 0000000..660dc02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to resources.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.resource;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
new file mode 100644
index 0000000..bd245e2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import java.util.Iterator;
+
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintFromProtoConverter;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintToProtoConverter;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for {@link PlacementConstraintToProtoConverter} and
+ * {@link PlacementConstraintFromProtoConverter}.
+ */
+public class TestPlacementConstraintPBConversion {
+
+  @Test
+  public void testTargetConstraintProtoConverter() {
+    AbstractConstraint sConstraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(sConstraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    Assert.assertTrue(protoConstraint.hasSimpleConstraint());
+    Assert.assertFalse(protoConstraint.hasCompositeConstraint());
+    SimplePlacementConstraintProto sProto =
+        protoConstraint.getSimpleConstraint();
+    Assert.assertEquals(single.getScope(), sProto.getScope());
+    Assert.assertEquals(single.getMinCardinality(), sProto.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(), sProto.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions().size(),
+        sProto.getTargetExpressionsList().size());
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof SingleConstraint);
+    SingleConstraint newSingle = (SingleConstraint) newConstraintExpr;
+    Assert.assertEquals(single.getScope(), newSingle.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        newSingle.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        newSingle.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        newSingle.getTargetExpressions());
+  }
+
+  @Test
+  public void testCardinalityConstraintProtoConverter() {
+    AbstractConstraint sConstraintExpr = cardinality(RACK, 3, 10);
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(sConstraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    compareSimpleConstraintToProto(single, protoConstraint);
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof SingleConstraint);
+    SingleConstraint newSingle = (SingleConstraint) newConstraintExpr;
+    compareSimpleConstraints(single, newSingle);
+  }
+
+  @Test
+  public void testCompositeConstraintProtoConverter() {
+    AbstractConstraint constraintExpr =
+        or(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+    Assert.assertTrue(constraintExpr instanceof Or);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Or orExpr = (Or) constraintExpr;
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(constraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    Assert.assertFalse(protoConstraint.hasSimpleConstraint());
+    Assert.assertTrue(protoConstraint.hasCompositeConstraint());
+    CompositePlacementConstraintProto cProto =
+        protoConstraint.getCompositeConstraint();
+
+    Assert.assertEquals(CompositeType.OR, cProto.getCompositeType());
+    Assert.assertEquals(3, cProto.getChildConstraintsCount());
+    Assert.assertEquals(0, cProto.getTimedChildConstraintsCount());
+    Iterator<AbstractConstraint> orChildren = orExpr.getChildren().iterator();
+    Iterator<PlacementConstraintProto> orProtoChildren =
+        cProto.getChildConstraintsList().iterator();
+    while (orChildren.hasNext() && orProtoChildren.hasNext()) {
+      AbstractConstraint orChild = orChildren.next();
+      PlacementConstraintProto orProtoChild = orProtoChildren.next();
+      compareSimpleConstraintToProto((SingleConstraint) orChild, orProtoChild);
+    }
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof Or);
+    Or newOrExpr = (Or) newConstraintExpr;
+    Assert.assertEquals(3, newOrExpr.getChildren().size());
+    orChildren = orExpr.getChildren().iterator();
+    Iterator<AbstractConstraint> newOrChildren =
+        newOrExpr.getChildren().iterator();
+    while (orChildren.hasNext() && newOrChildren.hasNext()) {
+      AbstractConstraint orChild = orChildren.next();
+      AbstractConstraint newOrChild = newOrChildren.next();
+      compareSimpleConstraints((SingleConstraint) orChild,
+          (SingleConstraint) newOrChild);
+    }
+  }
+
+  private void compareSimpleConstraintToProto(SingleConstraint constraint,
+      PlacementConstraintProto proto) {
+    Assert.assertTrue(proto.hasSimpleConstraint());
+    Assert.assertFalse(proto.hasCompositeConstraint());
+    SimplePlacementConstraintProto sProto = proto.getSimpleConstraint();
+    Assert.assertEquals(constraint.getScope(), sProto.getScope());
+    Assert.assertEquals(constraint.getMinCardinality(),
+        sProto.getMinCardinality());
+    Assert.assertEquals(constraint.getMaxCardinality(),
+        sProto.getMaxCardinality());
+    Assert.assertEquals(constraint.getTargetExpressions().size(),
+        sProto.getTargetExpressionsList().size());
+  }
+
+  private void compareSimpleConstraints(SingleConstraint single,
+      SingleConstraint newSingle) {
+    Assert.assertEquals(single.getScope(), newSingle.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        newSingle.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        newSingle.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        newSingle.getTargetExpressions());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
new file mode 100644
index 0000000..1763735
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint.TargetOperator;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SingleConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SpecializedConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for {@link PlacementConstraintTransformations}.
+ */
+public class TestPlacementConstraintTransformations {
+
+  @Test
+  public void testTargetConstraint() {
+    AbstractConstraint sConstraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Transform from SimpleConstraint to specialized TargetConstraint
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(sConstraint);
+    PlacementConstraint tConstraint = specTransformer.transform();
+
+    AbstractConstraint tConstraintExpr = tConstraint.getConstraintExpr();
+    Assert.assertTrue(tConstraintExpr instanceof TargetConstraint);
+
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    TargetConstraint target = (TargetConstraint) tConstraintExpr;
+    Assert.assertEquals(single.getScope(), target.getScope());
+    Assert.assertEquals(TargetOperator.IN, target.getOp());
+    Assert.assertEquals(single.getTargetExpressions(),
+        target.getTargetExpressions());
+
+    // Transform from specialized TargetConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(tConstraint);
+    sConstraint = singleTransformer.transform();
+
+    sConstraintExpr = sConstraint.getConstraintExpr();
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+
+    single = (SingleConstraint) sConstraintExpr;
+    Assert.assertEquals(target.getScope(), single.getScope());
+    Assert.assertEquals(1, single.getMinCardinality());
+    Assert.assertEquals(Integer.MAX_VALUE, single.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        target.getTargetExpressions());
+  }
+
+  @Test
+  public void testCardinalityConstraint() {
+    AbstractConstraint sConstraintExpr = cardinality(RACK, 3, 10);
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Transform from SimpleConstraint to specialized CardinalityConstraint
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(sConstraint);
+    PlacementConstraint cConstraint = specTransformer.transform();
+
+    AbstractConstraint cConstraintExpr = cConstraint.getConstraintExpr();
+    Assert.assertTrue(cConstraintExpr instanceof CardinalityConstraint);
+
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    CardinalityConstraint cardinality = (CardinalityConstraint) cConstraintExpr;
+    Assert.assertEquals(single.getScope(), cardinality.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        cardinality.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        cardinality.getMaxCardinality());
+
+    // Transform from specialized CardinalityConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(cConstraint);
+    sConstraint = singleTransformer.transform();
+
+    sConstraintExpr = sConstraint.getConstraintExpr();
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+
+    single = (SingleConstraint) sConstraintExpr;
+    Assert.assertEquals(cardinality.getScope(), single.getScope());
+    Assert.assertEquals(cardinality.getMinCardinality(),
+        single.getMinCardinality());
+    Assert.assertEquals(cardinality.getMaxCardinality(),
+        single.getMaxCardinality());
+    Assert.assertEquals(new HashSet<>(Arrays.asList(PlacementTargets.self())),
+        single.getTargetExpressions());
+  }
+
+  @Test
+  public void testTargetCardinalityConstraint() {
+    AbstractConstraint constraintExpr =
+        targetCardinality(RACK, 3, 10, allocationTag("zk"));
+    Assert.assertTrue(constraintExpr instanceof SingleConstraint);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+
+    // Apply transformation. Should be a no-op.
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(constraint);
+    PlacementConstraint newConstraint = specTransformer.transform();
+
+    // The constraint expression should be the same.
+    Assert.assertEquals(constraintExpr, newConstraint.getConstraintExpr());
+  }
+
+  @Test
+  public void testCompositeConstraint() {
+    AbstractConstraint constraintExpr =
+        or(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+    Assert.assertTrue(constraintExpr instanceof Or);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Or orExpr = (Or) constraintExpr;
+    for (AbstractConstraint child : orExpr.getChildren()) {
+      Assert.assertTrue(child instanceof SingleConstraint);
+    }
+
+    // Apply transformation. Should transform target and cardinality constraints
+    // included in the composite constraint to specialized ones.
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(constraint);
+    PlacementConstraint specConstraint = specTransformer.transform();
+
+    Or specOrExpr = (Or) specConstraint.getConstraintExpr();
+    List<AbstractConstraint> specChildren = specOrExpr.getChildren();
+    Assert.assertEquals(3, specChildren.size());
+    Assert.assertTrue(specChildren.get(0) instanceof TargetConstraint);
+    Assert.assertTrue(specChildren.get(1) instanceof CardinalityConstraint);
+    Assert.assertTrue(specChildren.get(2) instanceof SingleConstraint);
+
+    // Transform from specialized TargetConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(specConstraint);
+    PlacementConstraint simConstraint = singleTransformer.transform();
+    Assert.assertTrue(constraintExpr instanceof Or);
+    Or simOrExpr = (Or) specConstraint.getConstraintExpr();
+    for (AbstractConstraint child : simOrExpr.getChildren()) {
+      Assert.assertTrue(child instanceof SingleConstraint);
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] hadoop git commit: MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák.

Posted by as...@apache.org.
MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2018538f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2018538f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2018538f

Branch: refs/heads/YARN-6592
Commit: 2018538fdba1a95a6556187569e872fce7f9e1c3
Parents: 56ef527
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Sep 19 11:05:54 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Sep 19 11:05:54 2017 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 ++++---
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 ++++---
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 +++----
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java     | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 ++++---
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java    | 7 ++++---
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java    | 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java     | 6 +++---
 9 files changed, 31 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index 7e98d7d..da4ec79 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,6 +49,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,7 +83,8 @@ public class BaileyBorweinPlouffe extends Configured implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 8dec39d..7b73820 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,8 +29,6 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -49,6 +47,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for reading
@@ -77,7 +77,8 @@ import org.hsqldb.server.Server;
  */
 public class DBCountPageView extends Configured implements Tool {
 
-  private static final Log LOG = LogFactory.getLog(DBCountPageView.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DBCountPageView.class);
   
   private Connection connection;
   private boolean initialized = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
index 94d2c83..537b4d4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.examples.dancing;
 
 import java.util.*;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A generic solver for tile laying problems using Knuth's dancing link
@@ -35,8 +35,7 @@ import org.apache.commons.logging.LogFactory;
  * The type parameter ColumnName is the class of application's column names.
  */
 public class DancingLinks<ColumnName> {
-  private static final Log LOG = 
-    LogFactory.getLog(DancingLinks.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(DancingLinks.class);
   
   /**
    * A cell in the table with up/down and left/right links that form doubly

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
index 99f7c24..ffe63fe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.examples.pi.math.Summation;
@@ -55,6 +53,8 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The main class for computing sums using map/reduce jobs.
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
  * a mix-type job may be executed on either side.
  */
 public final class DistSum extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(DistSum.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DistSum.class);
 
   private static final String NAME = DistSum.class.getSimpleName();
   private static final String N_PARTS = "mapreduce.pi." + NAME + ".nParts";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index 53bbdc4..ef20c4c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -25,8 +25,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.zip.Checksum;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
@@ -49,6 +47,8 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Generate the official GraySort input data set.
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
  * <b>bin/hadoop jar hadoop-*-examples.jar teragen 10000000000 in-dir</b>
  */
 public class TeraGen extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(TeraGen.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TeraGen.class);
 
   public enum Counters {CHECKSUM}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
index 96580b1..e0ce36c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.examples.terasort;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -38,12 +36,15 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An output format that writes the key and value appended together.
  */
 public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
-  private static final Log LOG = LogFactory.getLog(TeraOutputFormat.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TeraOutputFormat.class);
   private OutputCommitter committer = null;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
index 3e12a3d..7a9e44b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
@@ -21,17 +21,18 @@ package org.apache.hadoop.examples.terasort;
 import java.io.*;
 import java.util.*;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
 class TeraScheduler {
-  private static final Log LOG = LogFactory.getLog(TeraScheduler.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TeraScheduler.class);
   private Split[] splits;
   private List<Host> hosts = new ArrayList<Host>();
   private int slotsPerHost;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
index 040d13f..8b698e3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.net.URI;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -38,6 +36,8 @@ import org.apache.hadoop.mapreduce.Partitioner;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Generates the sampled split points, launches the job, and waits for it to
@@ -47,7 +47,7 @@ import org.apache.hadoop.util.ToolRunner;
  * <b>bin/hadoop jar hadoop-*-examples.jar terasort in-dir out-dir</b>
  */
 public class TeraSort extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(TeraSort.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TeraSort.class);
 
   /**
    * A partitioner that splits text keys into roughly equal partitions

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
index b835a3b..b301659 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.examples.terasort;
 import java.io.File;
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.FileAlreadyExistsException;
@@ -29,12 +27,14 @@ import org.apache.hadoop.mapred.HadoopTestCase;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 public class TestTeraSort extends HadoopTestCase {
-  private static Log LOG = LogFactory.getLog(TestTeraSort.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestTeraSort.class);
   
   public TestTeraSort()
       throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] hadoop git commit: HDFS-10701. TestDFSStripedOutputStreamWithFailure#testBlockTokenExpired occasionally fails. Contributed by SammiChen.

Posted by as...@apache.org.
HDFS-10701. TestDFSStripedOutputStreamWithFailure#testBlockTokenExpired occasionally fails. Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef8cd5dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef8cd5dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef8cd5dc

Branch: refs/heads/YARN-6592
Commit: ef8cd5dc565f901b4954befe784675e130e84c3c
Parents: 1a84c24
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Sep 15 16:20:36 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri Sep 15 16:20:36 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java     | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef8cd5dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index ea889e3..57da439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -260,8 +260,6 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   @Test(timeout=240000)
   public void testBlockTokenExpired() throws Exception {
-    // TODO: this is very flaky, re-enable it later. See HDFS-12417.
-    assumeTrue("Test has been temporarily disabled. See HDFS-12417.", false);
     final int length = dataBlocks * (blockSize - cellSize);
     final HdfsConfiguration conf = newHdfsConfiguration();
 
@@ -494,8 +492,8 @@ public class TestDFSStripedOutputStreamWithFailure {
       final BlockManager bm = nn.getNamesystem().getBlockManager();
       final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
 
-      // set a short token lifetime (1 second)
-      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
+      // set a short token lifetime (6 second)
+      SecurityTestUtil.setBlockTokenLifetime(sm, 6000L);
     }
 
     final AtomicInteger pos = new AtomicInteger();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] hadoop git commit: HDFS-12371. BlockVerificationFailures and BlocksVerified show up as 0 in Datanode JMX. Contributed by Hanisha Koneru.

Posted by as...@apache.org.
HDFS-12371. BlockVerificationFailures and BlocksVerified show up as 0 in Datanode JMX. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bf921a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bf921a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bf921a5

Branch: refs/heads/YARN-6592
Commit: 6bf921a5c3152a307b5c0903056d73ce07775a08
Parents: 10d7493
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Sep 21 08:42:50 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Sep 21 08:42:50 2017 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java  | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf921a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 8b29fce..181ef80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
@@ -81,6 +82,8 @@ public class VolumeScanner extends Thread {
    */
   private final DataNode datanode;
 
+  private final DataNodeMetrics metrics;
+
   /**
    * A reference to the volume that we're scanning.
    */
@@ -299,6 +302,7 @@ public class VolumeScanner extends Thread {
   VolumeScanner(Conf conf, DataNode datanode, FsVolumeReference ref) {
     this.conf = conf;
     this.datanode = datanode;
+    this.metrics = datanode.getMetrics();
     this.ref = ref;
     this.volume = ref.getVolume();
     ScanResultHandler handler;
@@ -443,12 +447,14 @@ public class VolumeScanner extends Thread {
       throttler.setBandwidth(bytesPerSec);
       long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
       resultHandler.handle(block, null);
+      metrics.incrBlocksVerified();
       return bytesRead;
     } catch (IOException e) {
       resultHandler.handle(block, e);
     } finally {
       IOUtils.cleanup(null, blockSender);
     }
+    metrics.incrBlockVerificationFailures();
     return -1;
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] hadoop git commit: YARN-7203. Add container ExecutionType into ContainerReport. (Botong Huang via asuresh)

Posted by as...@apache.org.
YARN-7203. Add container ExecutionType into ContainerReport. (Botong Huang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56ef5279
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56ef5279
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56ef5279

Branch: refs/heads/YARN-6592
Commit: 56ef5279c1db93d03b2f1e04badbfe804f548918
Parents: 3cf3540
Author: Arun Suresh <as...@apache.org>
Authored: Mon Sep 18 15:49:31 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Mon Sep 18 15:49:31 2017 -0700

----------------------------------------------------------------------
 .../yarn/api/records/ContainerReport.java       | 26 ++++++++++++++++++++
 .../src/main/proto/yarn_protos.proto            |  1 +
 .../yarn/client/api/impl/TestYarnClient.java    |  1 +
 .../records/impl/pb/ContainerReportPBImpl.java  | 20 +++++++++++++++
 .../rmcontainer/RMContainerImpl.java            |  2 +-
 5 files changed, 49 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
index 11d7bca..31d2812 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
@@ -52,6 +52,18 @@ public abstract class ContainerReport {
       long creationTime, long finishTime, String diagnosticInfo, String logUrl,
       int containerExitStatus, ContainerState containerState,
       String nodeHttpAddress) {
+    return newInstance(containerId, allocatedResource, assignedNode, priority,
+        creationTime, finishTime, diagnosticInfo, logUrl, containerExitStatus,
+        containerState, nodeHttpAddress, ExecutionType.GUARANTEED);
+  }
+
+  @Private
+  @Unstable
+  public static ContainerReport newInstance(ContainerId containerId,
+      Resource allocatedResource, NodeId assignedNode, Priority priority,
+      long creationTime, long finishTime, String diagnosticInfo, String logUrl,
+      int containerExitStatus, ContainerState containerState,
+      String nodeHttpAddress, ExecutionType executionType) {
     ContainerReport report = Records.newRecord(ContainerReport.class);
     report.setContainerId(containerId);
     report.setAllocatedResource(allocatedResource);
@@ -64,6 +76,7 @@ public abstract class ContainerReport {
     report.setContainerExitStatus(containerExitStatus);
     report.setContainerState(containerState);
     report.setNodeHttpAddress(nodeHttpAddress);
+    report.setExecutionType(executionType);
     return report;
   }
 
@@ -209,4 +222,17 @@ public abstract class ContainerReport {
   @Private
   @Unstable
   public abstract void setNodeHttpAddress(String nodeHttpAddress);
+
+  /**
+   * Get the execution type of the container.
+   *
+   * @return the execution type of the container
+   */
+  @Public
+  @Unstable
+  public abstract ExecutionType getExecutionType();
+
+  @Private
+  @Unstable
+  public abstract void setExecutionType(ExecutionType executionType);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 066441c..fb340d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -137,6 +137,7 @@ message ContainerReportProto {
   optional int32 container_exit_status = 9;
   optional ContainerStateProto container_state = 10;
   optional string node_http_address = 11;
+  optional ExecutionTypeProto executionType = 12 [default = GUARANTEED];
 }
 
 enum YarnApplicationStateProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index cd0e472..4e5d8cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -601,6 +601,7 @@ public class TestYarnClient {
     Assert.assertEquals(report.getContainerId().toString(),
         (ContainerId.newContainerId(expectedReports.get(0)
             .getCurrentApplicationAttemptId(), 3)).toString());
+    Assert.assertNotNull(report.getExecutionType());
     client.stop();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
index 5d435da..2b58c70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.api.records.impl.pb;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -355,4 +356,23 @@ public class ContainerReportPBImpl extends ContainerReport {
     }
     builder.setNodeHttpAddress(nodeHttpAddress);
   }
+
+  @Override
+  public ExecutionType getExecutionType() {
+    ContainerReportProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasExecutionType()) {
+      return ExecutionType.GUARANTEED;  // default value
+    }
+    return ProtoUtils.convertFromProtoFormat(p.getExecutionType());
+  }
+
+  @Override
+  public void setExecutionType(ExecutionType executionType) {
+    maybeInitBuilder();
+    if (executionType == null) {
+      builder.clearExecutionType();
+      return;
+    }
+    builder.setExecutionType(ProtoUtils.convertToProtoFormat(executionType));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 8c165de..a43459c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -756,7 +756,7 @@ public class RMContainerImpl implements RMContainer {
           this.getAllocatedSchedulerKey().getPriority(), this.getCreationTime(),
           this.getFinishTime(), this.getDiagnosticsInfo(), this.getLogURL(),
           this.getContainerExitStatus(), this.getContainerState(),
-          this.getNodeHttpAddress());
+          this.getNodeHttpAddress(), this.getExecutionType());
     } finally {
       this.readLock.unlock();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] hadoop git commit: HDFS-12526. FSDirectory should use Time.monotonicNow for durations. Contributed by Bharat Viswanadham.

Posted by as...@apache.org.
HDFS-12526. FSDirectory should use Time.monotonicNow for durations. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50849ec9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50849ec9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50849ec9

Branch: refs/heads/YARN-6592
Commit: 50849ec9eb9bad90586a95d2b2380ee6e8724d6b
Parents: 8b33663
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Sep 21 19:27:48 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Sep 21 19:27:48 2017 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50849ec9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 6604b5a..3c55112 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -752,7 +752,7 @@ public class FSDirectory implements Closeable {
     try {
       int threads = (initThreads < 1) ? 1 : initThreads;
       LOG.info("Initializing quota with " + threads + " thread(s)");
-      long start = Time.now();
+      long start = Time.monotonicNow();
       QuotaCounts counts = new QuotaCounts.Builder().build();
       ForkJoinPool p = new ForkJoinPool(threads);
       RecursiveAction task = new InitQuotaTask(getBlockStoragePolicySuite(),
@@ -760,7 +760,7 @@ public class FSDirectory implements Closeable {
       p.execute(task);
       task.join();
       p.shutdown();
-      LOG.info("Quota initialization completed in " + (Time.now() - start) +
+      LOG.info("Quota initialization completed in " + (Time.monotonicNow() - start) +
           " milliseconds\n" + counts);
     } finally {
       writeUnlock();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] hadoop git commit: YARN-7192. Add a pluggable StateMachine Listener that is notified of NM Container State changes. Contributed by Arun Suresh

Posted by as...@apache.org.
YARN-7192. Add a pluggable StateMachine Listener that is notified of NM Container State changes. Contributed by Arun Suresh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4f9c7c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4f9c7c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4f9c7c9

Branch: refs/heads/YARN-6592
Commit: a4f9c7c9247801dd37beec6fc195622af1b884ad
Parents: 0f9af24
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Sep 18 10:16:09 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Sep 18 10:16:09 2017 -0500

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  6 +-
 .../state/MultiStateTransitionListener.java     | 61 ++++++++++++++++++
 .../hadoop/yarn/state/StateMachineFactory.java  | 40 ++++++++++++
 .../yarn/state/StateTransitionListener.java     | 50 ++++++++++++++
 .../src/main/resources/yarn-default.xml         |  6 ++
 .../ContainerStateTransitionListener.java       | 48 ++++++++++++++
 .../hadoop/yarn/server/nodemanager/Context.java |  2 +
 .../yarn/server/nodemanager/NodeManager.java    | 48 +++++++++++++-
 .../container/ContainerImpl.java                |  3 +-
 .../server/nodemanager/TestNodeManager.java     | 68 ++++++++++++++++++++
 .../amrmproxy/BaseAMRMProxyTest.java            |  8 +++
 .../container/TestContainer.java                | 53 +++++++++++++++
 12 files changed, 389 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 48910b3..114453f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -968,9 +968,13 @@ public class YarnConfiguration extends Configuration {
     NM_PREFIX + "bind-host";
 
   /** who will execute(launch) the containers.*/
-  public static final String NM_CONTAINER_EXECUTOR = 
+  public static final String NM_CONTAINER_EXECUTOR =
     NM_PREFIX + "container-executor.class";
 
+  /** List of container state transition listeners.*/
+  public static final String NM_CONTAINER_STATE_TRANSITION_LISTENERS =
+      NM_PREFIX + "container-state-transition-listener.classes";
+
   /**  
    * Adjustment to make to the container os scheduling priority.
    * The valid values for this could vary depending on the platform.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
new file mode 100644
index 0000000..1a28fc5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.state;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A {@link StateTransitionListener} that dispatches the pre and post
+ * state transitions to multiple registered listeners.
+ * NOTE: The registered listeners are called in a for loop. Clients should
+ *       know that a listener configured earlier might prevent a later listener
+ *       from being called, if for instance it throws an un-caught Exception.
+ */
+public abstract class MultiStateTransitionListener
+    <OPERAND, EVENT, STATE extends Enum<STATE>> implements
+    StateTransitionListener<OPERAND, EVENT, STATE> {
+
+  private final List<StateTransitionListener<OPERAND, EVENT, STATE>> listeners =
+      new ArrayList<>();
+
+  /**
+   * Add a listener to the list of listeners.
+   * @param listener A listener.
+   */
+  public void addListener(StateTransitionListener<OPERAND, EVENT, STATE>
+      listener) {
+    listeners.add(listener);
+  }
+
+  @Override
+  public void preTransition(OPERAND op, STATE beforeState,
+      EVENT eventToBeProcessed) {
+    for (StateTransitionListener<OPERAND, EVENT, STATE> listener : listeners) {
+      listener.preTransition(op, beforeState, eventToBeProcessed);
+    }
+  }
+
+  @Override
+  public void postTransition(OPERAND op, STATE beforeState, STATE afterState,
+      EVENT processedEvent) {
+    for (StateTransitionListener<OPERAND, EVENT, STATE> listener : listeners) {
+      listener.postTransition(op, beforeState, afterState, processedEvent);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
index 5b76ce8..4bb005c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
@@ -391,6 +391,21 @@ final public class StateMachineFactory
     }
   }
 
+  /**
+   * A StateMachine that accepts a transition listener.
+   * @param operand the object upon which the returned
+   *                {@link StateMachine} will operate.
+   * @param initialState the state in which the returned
+   *                {@link StateMachine} will start.
+   * @param listener An implementation of a {@link StateTransitionListener}.
+   * @return A (@link StateMachine}.
+   */
+  public StateMachine<STATE, EVENTTYPE, EVENT>
+        make(OPERAND operand, STATE initialState,
+             StateTransitionListener<OPERAND, EVENT, STATE> listener) {
+    return new InternalStateMachine(operand, initialState, listener);
+  }
+
   /* 
    * @return a {@link StateMachine} that starts in 
    *         {@code initialState} and whose {@link Transition} s are
@@ -424,14 +439,36 @@ final public class StateMachineFactory
     return new InternalStateMachine(operand, defaultInitialState);
   }
 
+  private static class NoopStateTransitionListener
+      implements StateTransitionListener {
+    @Override
+    public void preTransition(Object op, Enum beforeState,
+        Object eventToBeProcessed) { }
+
+    @Override
+    public void postTransition(Object op, Enum beforeState, Enum afterState,
+        Object processedEvent) { }
+  }
+
+  private static final NoopStateTransitionListener NOOP_LISTENER =
+      new NoopStateTransitionListener();
+
   private class InternalStateMachine
         implements StateMachine<STATE, EVENTTYPE, EVENT> {
     private final OPERAND operand;
     private STATE currentState;
+    private final StateTransitionListener<OPERAND, EVENT, STATE> listener;
 
     InternalStateMachine(OPERAND operand, STATE initialState) {
+      this(operand, initialState, null);
+    }
+
+    InternalStateMachine(OPERAND operand, STATE initialState,
+        StateTransitionListener<OPERAND, EVENT, STATE> transitionListener) {
       this.operand = operand;
       this.currentState = initialState;
+      this.listener =
+          (transitionListener == null) ? NOOP_LISTENER : transitionListener;
       if (!optimized) {
         maybeMakeStateMachineTable();
       }
@@ -445,8 +482,11 @@ final public class StateMachineFactory
     @Override
     public synchronized STATE doTransition(EVENTTYPE eventType, EVENT event)
          throws InvalidStateTransitionException  {
+      listener.preTransition(operand, currentState, event);
+      STATE oldState = currentState;
       currentState = StateMachineFactory.this.doTransition
           (operand, currentState, eventType, event);
+      listener.postTransition(operand, oldState, currentState, event);
       return currentState;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateTransitionListener.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateTransitionListener.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateTransitionListener.java
new file mode 100644
index 0000000..657c193
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateTransitionListener.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.state;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A State Transition Listener.
+ * It exposes a pre and post transition hook called before and
+ * after the transition.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface StateTransitionListener
+    <OPERAND, EVENT, STATE extends Enum<STATE>> {
+
+  /**
+   * Pre Transition Hook. This will be called before transition.
+   * @param op Operand.
+   * @param beforeState State before transition.
+   * @param eventToBeProcessed Incoming Event.
+   */
+  void preTransition(OPERAND op, STATE beforeState, EVENT eventToBeProcessed);
+
+  /**
+   * Post Transition Hook. This will be called after the transition.
+   * @param op Operand.
+   * @param beforeState State before transition.
+   * @param afterState State after transition.
+   * @param processedEvent Processed Event.
+   */
+  void postTransition(OPERAND op, STATE beforeState, STATE afterState,
+      EVENT processedEvent);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 6444da9..0440458 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1004,6 +1004,12 @@
   </property>
 
   <property>
+    <description>Comma separated List of container state transition listeners.</description>
+    <name>yarn.nodemanager.container-state-transition-listener.classes</name>
+    <value></value>
+  </property>
+
+  <property>
     <description>Number of threads container manager uses.</description>
     <name>yarn.nodemanager.container-manager.thread-count</name>
     <value>20</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerStateTransitionListener.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerStateTransitionListener.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerStateTransitionListener.java
new file mode 100644
index 0000000..24cdb1f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerStateTransitionListener.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.state.StateTransitionListener;
+
+/**
+ * Interface to be used by external cluster operators to implement a
+ * State Transition listener that is notified before and after a container
+ * state transition.
+ * NOTE: The pre and post transition callbacks will be made in the synchronized
+ *       block as the call to the instrumented transition - Serially, in the
+ *       order: preTransition, transition and postTransition. The implementor
+ *       must ensure that the callbacks return in a timely manner to avoid
+ *       blocking the state-machine.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ContainerStateTransitionListener extends
+    StateTransitionListener<ContainerImpl, ContainerEvent, ContainerState> {
+
+  /**
+   * Init method which will be invoked by the Node Manager to inject the
+   * NM {@link Context}.
+   * @param context NM Context.
+   */
+  void init(Context context);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
index 00bd0ef..a2d00a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
@@ -120,4 +120,6 @@ public interface Context {
   NMTimelinePublisher getNMTimelinePublisher();
 
   ContainerExecutor getContainerExecutor();
+
+  ContainerStateTransitionListener getContainerStateTransitionListener();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 3e919c5..a97b3f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -20,12 +20,18 @@ package org.apache.hadoop.yarn.server.nodemanager;
 
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.state.MultiStateTransitionListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -130,6 +136,17 @@ public class NodeManager extends CompositeService
   private boolean rmWorkPreservingRestartEnabled;
   private boolean shouldExitOnShutdownEvent = false;
 
+  /**
+   * Default Container State transition listener.
+   */
+  public static class DefaultContainerStateListener extends
+      MultiStateTransitionListener
+          <ContainerImpl, ContainerEvent, ContainerState>
+      implements ContainerStateTransitionListener {
+    @Override
+    public void init(Context context) {}
+  }
+
   public NodeManager() {
     super(NodeManager.class.getName());
   }
@@ -219,8 +236,22 @@ public class NodeManager extends CompositeService
       NMTokenSecretManagerInNM nmTokenSecretManager,
       NMStateStoreService stateStore, boolean isDistSchedulerEnabled,
       Configuration conf) {
-    return new NMContext(containerTokenSecretManager, nmTokenSecretManager,
-        dirsHandler, aclsManager, stateStore, isDistSchedulerEnabled, conf);
+    List<ContainerStateTransitionListener> listeners =
+        conf.getInstances(
+            YarnConfiguration.NM_CONTAINER_STATE_TRANSITION_LISTENERS,
+        ContainerStateTransitionListener.class);
+    NMContext nmContext = new NMContext(containerTokenSecretManager,
+        nmTokenSecretManager, dirsHandler, aclsManager, stateStore,
+        isDistSchedulerEnabled, conf);
+    DefaultContainerStateListener defaultListener =
+        new DefaultContainerStateListener();
+    nmContext.setContainerStateTransitionListener(defaultListener);
+    defaultListener.init(nmContext);
+    for (ContainerStateTransitionListener listener : listeners) {
+      listener.init(nmContext);
+      defaultListener.addListener(listener);
+    }
+    return nmContext;
   }
 
   protected void doSecureLogin() throws IOException {
@@ -563,6 +594,8 @@ public class NodeManager extends CompositeService
 
     private NMTimelinePublisher nmTimelinePublisher;
 
+    private ContainerStateTransitionListener containerStateTransitionListener;
+
     public NMContext(NMContainerTokenSecretManager containerTokenSecretManager,
         NMTokenSecretManagerInNM nmTokenSecretManager,
         LocalDirsHandlerService dirsHandler, ApplicationACLsManager aclsManager,
@@ -752,6 +785,17 @@ public class NodeManager extends CompositeService
     public void setContainerExecutor(ContainerExecutor executor) {
       this.executor = executor;
     }
+
+    @Override
+    public ContainerStateTransitionListener
+        getContainerStateTransitionListener() {
+      return this.containerStateTransitionListener;
+    }
+
+    public void setContainerStateTransitionListener(
+        ContainerStateTransitionListener transitionListener) {
+      this.containerStateTransitionListener = transitionListener;
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 9b9c47f..df107a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -240,7 +240,8 @@ public class ContainerImpl implements Container {
     this.containerRetryContext = configureRetryContext(
         conf, launchContext, this.containerId);
     this.remainingRetryAttempts = this.containerRetryContext.getMaxRetries();
-    stateMachine = stateMachineFactory.make(this);
+    stateMachine = stateMachineFactory.make(this, ContainerState.NEW,
+        context.getContainerStateTransitionListener());
     this.context = context;
     this.resourceSet = new ResourceSet();
     this.resourceMappings = new ResourceMappings();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManager.java
index 2d390ac..9279711 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManager.java
@@ -25,6 +25,9 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider;
 import org.junit.Assert;
 import org.junit.Test;
@@ -57,6 +60,71 @@ public class TestNodeManager {
     }
   }
 
+  private static int initCalls = 0;
+  private static int preCalls = 0;
+  private static int postCalls = 0;
+
+  private static class DummyCSTListener1
+      implements ContainerStateTransitionListener {
+    @Override
+    public void init(Context context) {
+      initCalls++;
+    }
+
+    @Override
+    public void preTransition(ContainerImpl op, ContainerState beforeState,
+        ContainerEvent eventToBeProcessed) {
+      preCalls++;
+    }
+
+    @Override
+    public void postTransition(ContainerImpl op, ContainerState beforeState,
+        ContainerState afterState, ContainerEvent processedEvent) {
+      postCalls++;
+    }
+  }
+
+  private static class DummyCSTListener2
+      implements ContainerStateTransitionListener {
+    @Override
+    public void init(Context context) {
+      initCalls++;
+    }
+
+    @Override
+    public void preTransition(ContainerImpl op, ContainerState beforeState,
+        ContainerEvent eventToBeProcessed) {
+      preCalls++;
+    }
+
+    @Override
+    public void postTransition(ContainerImpl op, ContainerState beforeState,
+        ContainerState afterState, ContainerEvent processedEvent) {
+      postCalls++;
+    }
+  }
+
+  @Test
+  public void testListenerInitialization() throws Exception{
+    NodeManager nodeManager = new NodeManager();
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.NM_CONTAINER_STATE_TRANSITION_LISTENERS,
+        DummyCSTListener1.class.getName() + ","
+            + DummyCSTListener2.class.getName());
+    initCalls = 0;
+    preCalls = 0;
+    postCalls = 0;
+    NodeManager.NMContext nmContext =
+        nodeManager.createNMContext(null, null, null, false, conf);
+    Assert.assertEquals(2, initCalls);
+    nmContext.getContainerStateTransitionListener().preTransition(
+        null, null, null);
+    nmContext.getContainerStateTransitionListener().postTransition(
+        null, null, null, null);
+    Assert.assertEquals(2, preCalls);
+    Assert.assertEquals(2, postCalls);
+  }
+
   @Test
   public void testCreationOfNodeLabelsProviderService()
       throws InterruptedException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 7c8551e..0838f1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -33,6 +33,8 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.yarn.server.nodemanager.ContainerStateTransitionListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -765,5 +767,11 @@ public abstract class BaseAMRMProxyTest {
     public ContainerExecutor getContainerExecutor() {
       return null;
     }
+
+    @Override
+    public ContainerStateTransitionListener
+        getContainerStateTransitionListener() {
+      return null;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 8909088..64e6cf0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -71,7 +71,9 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerStateTransitionListener;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
@@ -287,6 +289,29 @@ public class TestContainer {
       assertEquals(ContainerState.DONE, wc.c.getContainerState());
       assertEquals(completed + 1, metrics.getCompletedContainers());
       assertEquals(running, metrics.getRunningContainers());
+
+      ContainerEventType e1 = wc.initStateToEvent.get(ContainerState.NEW);
+      ContainerState s2 = wc.eventToFinalState.get(e1);
+      ContainerEventType e2 = wc.initStateToEvent.get(s2);
+      ContainerState s3 = wc.eventToFinalState.get(e2);
+      ContainerEventType e3 = wc.initStateToEvent.get(s3);
+      ContainerState s4 = wc.eventToFinalState.get(e3);
+      ContainerEventType e4 = wc.initStateToEvent.get(s4);
+      ContainerState s5 = wc.eventToFinalState.get(e4);
+      ContainerEventType e5 = wc.initStateToEvent.get(s5);
+      ContainerState s6 = wc.eventToFinalState.get(e5);
+
+      Assert.assertEquals(ContainerState.LOCALIZING, s2);
+      Assert.assertEquals(ContainerState.SCHEDULED, s3);
+      Assert.assertEquals(ContainerState.RUNNING, s4);
+      Assert.assertEquals(ContainerState.EXITED_WITH_SUCCESS, s5);
+      Assert.assertEquals(ContainerState.DONE, s6);
+
+      Assert.assertEquals(ContainerEventType.INIT_CONTAINER, e1);
+      Assert.assertEquals(ContainerEventType.RESOURCE_LOCALIZED, e2);
+      Assert.assertEquals(ContainerEventType.CONTAINER_LAUNCHED, e3);
+      Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS, e4);
+      Assert.assertEquals(ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP, e5);
     }
     finally {
       if (wc != null) {
@@ -401,6 +426,10 @@ public class TestContainer {
       Assert.assertTrue(
           containerMetrics.finishTime.value() > containerMetrics.startTime
               .value());
+      Assert.assertEquals(ContainerEventType.KILL_CONTAINER,
+          wc.initStateToEvent.get(ContainerState.NEW));
+      Assert.assertEquals(ContainerState.DONE,
+          wc.eventToFinalState.get(ContainerEventType.KILL_CONTAINER));
     } finally {
       if (wc != null) {
         wc.finished();
@@ -942,6 +971,10 @@ public class TestContainer {
     final Map<String, LocalResource> localResources;
     final Map<String, ByteBuffer> serviceData;
     final Context context = mock(Context.class);
+    private final Map<ContainerState, ContainerEventType> initStateToEvent =
+        new HashMap<>();
+    private final Map<ContainerEventType, ContainerState> eventToFinalState =
+        new HashMap<>();
 
     WrappedContainer(int appId, long timestamp, int id, String user)
         throws IOException {
@@ -1048,7 +1081,27 @@ public class TestContainer {
       }
       when(ctxt.getServiceData()).thenReturn(serviceData);
       when(ctxt.getContainerRetryContext()).thenReturn(containerRetryContext);
+      ContainerStateTransitionListener listener =
+          new ContainerStateTransitionListener() {
+        @Override
+        public void init(Context cntxt) {}
+
+        @Override
+        public void preTransition(ContainerImpl op, ContainerState beforeState,
+            ContainerEvent eventToBeProcessed) {
+          initStateToEvent.put(beforeState, eventToBeProcessed.getType());
+        }
 
+        @Override
+        public void postTransition(ContainerImpl op, ContainerState beforeState,
+            ContainerState afterState, ContainerEvent processedEvent) {
+          eventToFinalState.put(processedEvent.getType(), afterState);
+        }
+      };
+      NodeManager.DefaultContainerStateListener multi =
+          new NodeManager.DefaultContainerStateListener();
+      multi.addListener(listener);
+      when(context.getContainerStateTransitionListener()).thenReturn(multi);
       c = new ContainerImpl(conf, dispatcher, ctxt, null, metrics, identifier,
           context);
       dispatcher.register(ContainerEventType.class,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] hadoop git commit: Revert "YARN-7162. Remove XML excludes file format (rkanter)" - wrong commit message

Posted by as...@apache.org.
Revert "YARN-7162. Remove XML excludes file format (rkanter)" - wrong commit message

This reverts commit 3a8d57a0a2e047b34be82f602a2b6cf5593d2125.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f496683
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f496683
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f496683

Branch: refs/heads/YARN-6592
Commit: 5f496683fb00ba26a6bf5a506ae87d4bc4088727
Parents: a2dcba1
Author: Robert Kanter <rk...@apache.org>
Authored: Mon Sep 18 10:32:08 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Mon Sep 18 10:32:08 2017 -0700

----------------------------------------------------------------------
 .../hadoop-mapreduce-client-core/pom.xml        |  4 --
 .../hadoop/mapreduce/JobResourceUploader.java   | 17 --------
 .../apache/hadoop/mapreduce/MRJobConfig.java    |  5 ---
 .../src/main/resources/mapred-default.xml       |  9 ----
 .../mapreduce/TestJobResourceUploader.java      | 46 --------------------
 5 files changed, 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index ce5fdc8..c34f7bd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -44,10 +44,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index d9bf988..f1cad57 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -36,8 +36,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 
@@ -96,11 +94,6 @@ class JobResourceUploader {
         new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
     mkdirs(jtFs, submitJobDir, mapredSysPerms);
 
-    if (!conf.getBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
-        MRJobConfig.DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED)) {
-      disableErasureCodingForPath(jtFs, submitJobDir);
-    }
-
     Collection<String> files = conf.getStringCollection("tmpfiles");
     Collection<String> libjars = conf.getStringCollection("tmpjars");
     Collection<String> archives = conf.getStringCollection("tmparchives");
@@ -582,14 +575,4 @@ class JobResourceUploader {
     }
     return finalPath;
   }
-
-  private void disableErasureCodingForPath(FileSystem fs, Path path)
-      throws IOException {
-    if (jtFs instanceof DistributedFileSystem) {
-      LOG.info("Disabling Erasure Coding for path: " + path);
-      DistributedFileSystem dfs = (DistributedFileSystem) jtFs;
-      dfs.setErasureCodingPolicy(path,
-          SystemErasureCodingPolicies.getReplicationPolicy().getName());
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 86abb42..2023ba3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -1037,9 +1037,4 @@ public interface MRJobConfig {
   String FINISH_JOB_WHEN_REDUCERS_DONE =
       "mapreduce.job.finish-when-all-reducers-done";
   boolean DEFAULT_FINISH_JOB_WHEN_REDUCERS_DONE = true;
-
-  String MR_AM_STAGING_DIR_ERASURECODING_ENABLED =
-      MR_AM_STAGING_DIR + "erasurecoding.enabled";
-
-  boolean DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED = false;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 6b6faf2..ee9b906 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1261,15 +1261,6 @@
 </property>
 
 <property>
-  <name>yarn.app.mapreduce.am.staging-dir.erasurecoding.enabled</name>
-  <value>false</value>
-  <description>Whether Erasure Coding should be enabled for
-  files that are copied to the MR staging area. This is a job-level
-  setting.
-  </description>
-</property>
-
-<property>
   <name>mapreduce.am.max-attempts</name>
   <value>2</value>
   <description>The maximum number of application attempts. It is a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index d0d7a34..20b7b7d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -20,11 +20,6 @@ package org.apache.hadoop.mapreduce;
 
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.times;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
 
 import java.io.IOException;
 import java.net.URI;
@@ -41,12 +36,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapred.JobConf;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.verification.VerificationMode;
 
 /**
  * A class for unit testing JobResourceUploader.
@@ -365,40 +357,6 @@ public class TestJobResourceUploader {
         expectedArchivesWithFrags, expectedJobJar);
   }
 
-  @Test
-  public void testErasureCodingDefault() throws IOException {
-    testErasureCodingSetting(true);
-  }
-
-  @Test
-  public void testErasureCodingDisabled() throws IOException {
-    testErasureCodingSetting(false);
-  }
-
-  private void testErasureCodingSetting(boolean defaultBehavior)
-      throws IOException {
-    JobConf jConf = new JobConf();
-    // don't set to false if EC remains disabled to check default setting
-    if (!defaultBehavior) {
-      jConf.setBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
-          true);
-    }
-
-    DistributedFileSystem fs = mock(DistributedFileSystem.class);
-    Path path = new Path("/");
-    when(fs.makeQualified(any(Path.class))).thenReturn(path);
-    JobResourceUploader uploader = new StubedUploader(fs, true);
-    Job job = Job.getInstance(jConf);
-
-    uploader.uploadResources(job, new Path("/test"));
-
-    String replicationPolicyName = SystemErasureCodingPolicies
-        .getReplicationPolicy().getName();
-    VerificationMode mode = defaultBehavior ? times(1) : never();
-    verify(fs, mode).setErasureCodingPolicy(eq(path),
-        eq(replicationPolicyName));
-  }
-
   private void runTmpResourcePathTest(JobResourceUploader uploader,
       ResourceConf rConf, JobConf jConf, String[] expectedFiles,
       String[] expectedArchives, String expectedJobJar) throws IOException {
@@ -740,10 +698,6 @@ public class TestJobResourceUploader {
       super(FileSystem.getLocal(conf), useWildcard);
     }
 
-    StubedUploader(FileSystem fs, boolean useWildcard) throws IOException {
-      super(fs, useWildcard);
-    }
-
     @Override
     FileStatus getFileStatus(Map<URI, FileStatus> statCache, Configuration job,
         Path p) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] hadoop git commit: YARN-7034. DefaultLinuxContainerRuntime and DockerLinuxContainerRuntime sends client environment variables to container-executor. Contributed by Miklos Szegedi.

Posted by as...@apache.org.
YARN-7034. DefaultLinuxContainerRuntime and DockerLinuxContainerRuntime sends client environment variables to container-executor. Contributed by Miklos Szegedi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5e1851d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5e1851d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5e1851d

Branch: refs/heads/YARN-6592
Commit: e5e1851d803bf8d8b96fec1b5c0058014e9329d0
Parents: a92ef03
Author: Junping Du <ju...@apache.org>
Authored: Thu Sep 21 14:01:16 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Thu Sep 21 14:01:16 2017 -0700

----------------------------------------------------------------------
 .../privileged/PrivilegedOperationExecutor.java |  2 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |  8 +--
 .../runtime/DockerLinuxContainerRuntime.java    |  9 +--
 .../TestLinuxContainerExecutorWithMocks.java    | 67 +++++++++++++++++++-
 .../runtime/TestDockerContainerRuntime.java     |  3 +-
 5 files changed, 73 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e1851d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
index 5a3ce74..9f13a5e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
@@ -204,7 +204,7 @@ public class PrivilegedOperationExecutor {
   public String executePrivilegedOperation(PrivilegedOperation operation,
       boolean grabOutput) throws PrivilegedOperationException {
     return executePrivilegedOperation(null, operation, null, null, grabOutput,
-        true);
+        false);
   }
 
   //Utility functions for squashing together operations in supported ways

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e1851d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index d09e4a1..6c3ae85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -80,7 +80,6 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
   @Override
   public void launchContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
-    Container container = ctx.getContainer();
     PrivilegedOperation launchOp = new PrivilegedOperation(
         PrivilegedOperation.OperationType.LAUNCH_CONTAINER);
 
@@ -116,8 +115,7 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
 
     try {
       privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
-            launchOp, null, container.getLaunchContext().getEnvironment(),
-            false, false);
+            launchOp, null, null, false, false);
     } catch (PrivilegedOperationException e) {
       LOG.warn("Launch container failed. Exception: ", e);
 
@@ -129,7 +127,6 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
   @Override
   public void signalContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
-    Container container = ctx.getContainer();
     PrivilegedOperation signalOp = new PrivilegedOperation(
         PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
 
@@ -148,8 +145,7 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
           .getInstance(conf);
 
       executor.executePrivilegedOperation(null,
-          signalOp, null, container.getLaunchContext().getEnvironment(),
-          false, true);
+          signalOp, null, null, false, false);
     } catch (PrivilegedOperationException e) {
       //Don't log the failure here. Some kinds of signaling failures are
       // acceptable. Let the calling executor decide what to do.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e1851d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 1ccd365..6491a99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -549,8 +549,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     try {
       privilegedOperationExecutor.executePrivilegedOperation(null,
-          launchOp, null, container.getLaunchContext().getEnvironment(),
-          false, false);
+          launchOp, null, null, false, false);
     } catch (PrivilegedOperationException e) {
       LOG.warn("Launch container failed. Exception: ", e);
       LOG.info("Docker command used: " + runCommand.getCommandWithArguments());
@@ -563,7 +562,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   @Override
   public void signalContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
-    Container container = ctx.getContainer();
     ContainerExecutor.Signal signal = ctx.getExecutionAttribute(SIGNAL);
 
     PrivilegedOperation privOp = null;
@@ -594,8 +592,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     try {
       privilegedOperationExecutor.executePrivilegedOperation(null,
-          privOp, null, container.getLaunchContext().getEnvironment(),
-          false, false);
+          privOp, null, null, false, false);
     } catch (PrivilegedOperationException e) {
       throw new ContainerExecutionException("Signal container failed", e
           .getExitCode(), e.getOutput(), e.getErrorOutput());
@@ -623,7 +620,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       privOp.appendArgs(commandFile);
       String output = privilegedOperationExecutor
           .executePrivilegedOperation(null, privOp, null,
-              container.getLaunchContext().getEnvironment(), true, false);
+              null, true, false);
       LOG.info("Docker inspect output for " + containerId + ": " + output);
       int index = output.lastIndexOf(',');
       if (index == -1) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e1851d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index 6ca96db..9fa22d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -72,10 +72,16 @@ import org.junit.Assert;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
+import static org.mockito.Matchers.anyListOf;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
 public class TestLinuxContainerExecutorWithMocks {
 
@@ -91,6 +97,8 @@ public class TestLinuxContainerExecutorWithMocks {
 
   private String tmpMockExecutor;
   private LinuxContainerExecutor mockExec = null;
+  private LinuxContainerExecutor mockExecMockRuntime = null;
+  private PrivilegedOperationExecutor mockPrivilegedExec;
   private final File mockParamFile = new File("./params.txt");
   private LocalDirsHandlerService dirsHandler;
   
@@ -140,22 +148,28 @@ public class TestLinuxContainerExecutorWithMocks {
 
     Configuration conf = new YarnConfiguration();
     LinuxContainerRuntime linuxContainerRuntime;
+    LinuxContainerRuntime mockLinuxContainerRuntime;
 
     setupMockExecutor(MOCK_EXECUTOR, conf);
     linuxContainerRuntime = new DefaultLinuxContainerRuntime(
         PrivilegedOperationExecutor.getInstance(conf));
+    mockPrivilegedExec = Mockito.mock(PrivilegedOperationExecutor.class);
+    mockLinuxContainerRuntime = new DefaultLinuxContainerRuntime(
+        mockPrivilegedExec);
     dirsHandler = new LocalDirsHandlerService();
     dirsHandler.init(conf);
     linuxContainerRuntime.initialize(conf);
     mockExec = new LinuxContainerExecutor(linuxContainerRuntime);
     mockExec.setConf(conf);
+    mockExecMockRuntime = new LinuxContainerExecutor(mockLinuxContainerRuntime);
+    mockExecMockRuntime.setConf(conf);
   }
 
   @After
   public void tearDown() {
     deleteMockParamFile();
   }
-  
+
   @Test
   public void testContainerLaunch()
       throws IOException, ConfigurationException {
@@ -168,7 +182,7 @@ public class TestLinuxContainerExecutorWithMocks {
     ContainerId cId = mock(ContainerId.class);
     ContainerLaunchContext context = mock(ContainerLaunchContext.class);
     HashMap<String, String> env = new HashMap<String,String>();
-    
+
     when(container.getContainerId()).thenReturn(cId);
     when(container.getLaunchContext()).thenReturn(context);
     
@@ -605,4 +619,53 @@ public class TestLinuxContainerExecutorWithMocks {
           e.getMessage().contains("exit code"));
     }
   }
+
+  @Test
+  public void testContainerLaunchEnvironment()
+      throws IOException, ConfigurationException,
+      PrivilegedOperationException {
+    String appSubmitter = "nobody";
+    String appId = "APP_ID";
+    String containerId = "CONTAINER_ID";
+    Container container = mock(Container.class);
+    ContainerId cId = mock(ContainerId.class);
+    ContainerLaunchContext context = mock(ContainerLaunchContext.class);
+    HashMap<String, String> env = new HashMap<String, String>();
+    env.put("FROM_CLIENT", "1");
+
+    when(container.getContainerId()).thenReturn(cId);
+    when(container.getLaunchContext()).thenReturn(context);
+
+    when(cId.toString()).thenReturn(containerId);
+
+    when(context.getEnvironment()).thenReturn(env);
+
+    Path scriptPath = new Path("file:///bin/echo");
+    Path tokensPath = new Path("file:///dev/null");
+    Path workDir = new Path("/tmp");
+    Path pidFile = new Path(workDir, "pid.txt");
+
+    mockExecMockRuntime.activateContainer(cId, pidFile);
+    mockExecMockRuntime.launchContainer(new ContainerStartContext.Builder()
+        .setContainer(container)
+        .setNmPrivateContainerScriptPath(scriptPath)
+        .setNmPrivateTokensPath(tokensPath)
+        .setUser(appSubmitter)
+        .setAppId(appId)
+        .setContainerWorkDir(workDir)
+        .setLocalDirs(dirsHandler.getLocalDirs())
+        .setLogDirs(dirsHandler.getLogDirs())
+        .setFilecacheDirs(new ArrayList<>())
+        .setUserLocalDirs(new ArrayList<>())
+        .setContainerLocalDirs(new ArrayList<>())
+        .setContainerLogDirs(new ArrayList<>())
+        .build());
+    ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass(
+        PrivilegedOperation.class);
+    // Verify that
+    verify(mockPrivilegedExec, times(1))
+        .executePrivilegedOperation(anyListOf(
+            String.class), opCaptor.capture(), any(
+            File.class), eq(null), eq(false), eq(false));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e1851d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 3f4bb5c..0e314bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -116,6 +116,7 @@ public class TestDockerContainerRuntime {
     cId = mock(ContainerId.class);
     context = mock(ContainerLaunchContext.class);
     env = new HashMap<String, String>();
+    env.put("FROM_CLIENT", "1");
     image = "busybox:latest";
 
     env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_IMAGE, image);
@@ -204,7 +205,7 @@ public class TestDockerContainerRuntime {
     // warning annotation on the entire method
     verify(mockExecutor, times(1))
         .executePrivilegedOperation(anyList(), opCaptor.capture(), any(
-            File.class), any(Map.class), eq(false), eq(false));
+            File.class), eq(null), eq(false), eq(false));
 
     //verification completed. we need to isolate specific invications.
     // hence, reset mock here


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] hadoop git commit: HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.

Posted by as...@apache.org.
HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a12f09ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a12f09ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a12f09ba

Branch: refs/heads/YARN-6592
Commit: a12f09ba3c4a3aa4c4558090c5e1b7bcaebe3b94
Parents: ce943eb
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Sep 20 11:51:17 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Wed Sep 20 11:51:17 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +--
 .../hadoop/hdfs/DistributedFileSystem.java      |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  4 +-
 .../hdfs/protocol/AddECPolicyResponse.java      | 68 --------------------
 .../AddErasureCodingPolicyResponse.java         | 68 ++++++++++++++++++++
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java     | 11 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 ++++---
 .../src/main/proto/erasurecoding.proto          |  2 +-
 .../src/main/proto/hdfs.proto                   |  2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 13 ++--
 .../hdfs/server/namenode/FSNamesystem.java      | 15 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  4 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  7 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    | 25 +++----
 .../server/namenode/TestFSEditLogLoader.java    |  4 +-
 .../hdfs/server/namenode/TestFSImage.java       |  5 +-
 19 files changed, 147 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7e8e95b..8d51a9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2807,13 +2807,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
   }
 
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies) throws IOException {
     checkOpen();
     try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
       return namenode.addErasureCodingPolicies(policies);
     } catch (RemoteException re) {
-      throw re.unwrapRemoteException(AccessControlException.class);
+      throw re.unwrapRemoteException(AccessControlException.class,
+          SafeModeException.class);
     }
   }
 
@@ -2823,7 +2824,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
       namenode.removeErasureCodingPolicy(ecPolicyName);
     } catch (RemoteException re) {
-      throw re.unwrapRemoteException(AccessControlException.class);
+      throw re.unwrapRemoteException(AccessControlException.class,
+          SafeModeException.class);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f6331cf..c9f4490 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2650,7 +2650,7 @@ public class DistributedFileSystem extends FileSystem {
    * @return Return the response list of adding operations.
    * @throws IOException
    */
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies)  throws IOException {
     return dfs.addErasureCodingPolicies(policies);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index bfc6010..8dc5810 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
@@ -562,7 +562,7 @@ public class HdfsAdmin {
    * @return Return the response list of adding operations.
    * @throws IOException
    */
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies)  throws IOException {
     return dfs.addErasureCodingPolicies(policies);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddECPolicyResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddECPolicyResponse.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddECPolicyResponse.java
deleted file mode 100644
index 4e55680..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddECPolicyResponse.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-
-/**
- * A response of add an ErasureCoding policy.
- */
-public class AddECPolicyResponse {
-  private boolean succeed;
-  private ErasureCodingPolicy policy;
-  private String errorMsg;
-
-  public AddECPolicyResponse(ErasureCodingPolicy policy) {
-    this.policy = policy;
-    this.succeed = true;
-  }
-
-  public AddECPolicyResponse(ErasureCodingPolicy policy,
-      String errorMsg) {
-    this.policy = policy;
-    this.errorMsg = errorMsg;
-    this.succeed = false;
-  }
-
-  public AddECPolicyResponse(ErasureCodingPolicy policy,
-      HadoopIllegalArgumentException e) {
-    this(policy, e.getMessage());
-  }
-
-  public boolean isSucceed() {
-    return succeed;
-  }
-
-  public ErasureCodingPolicy getPolicy() {
-    return policy;
-  }
-
-  public String getErrorMsg() {
-    return errorMsg;
-  }
-
-  @Override
-  public String toString() {
-    if (isSucceed()) {
-      return "Add ErasureCodingPolicy " + getPolicy().getName() + " succeed.";
-    } else {
-      return "Add ErasureCodingPolicy " + getPolicy().getName() + " failed and "
-          + "error message is " + getErrorMsg();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
new file mode 100644
index 0000000..2e8d081
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+
+/**
+ * A response of add an ErasureCoding policy.
+ */
+public class AddErasureCodingPolicyResponse {
+  private boolean succeed;
+  private ErasureCodingPolicy policy;
+  private String errorMsg;
+
+  public AddErasureCodingPolicyResponse(ErasureCodingPolicy policy) {
+    this.policy = policy;
+    this.succeed = true;
+  }
+
+  public AddErasureCodingPolicyResponse(ErasureCodingPolicy policy,
+                                        String errorMsg) {
+    this.policy = policy;
+    this.errorMsg = errorMsg;
+    this.succeed = false;
+  }
+
+  public AddErasureCodingPolicyResponse(ErasureCodingPolicy policy,
+                                        HadoopIllegalArgumentException e) {
+    this(policy, e.getMessage());
+  }
+
+  public boolean isSucceed() {
+    return succeed;
+  }
+
+  public ErasureCodingPolicy getPolicy() {
+    return policy;
+  }
+
+  public String getErrorMsg() {
+    return errorMsg;
+  }
+
+  @Override
+  public String toString() {
+    if (isSucceed()) {
+      return "Add ErasureCodingPolicy " + getPolicy().getName() + " succeed.";
+    } else {
+      return "Add ErasureCodingPolicy " + getPolicy().getName() + " failed and "
+          + "error message is " + getErrorMsg();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 8d5503f..25f63bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1584,7 +1584,7 @@ public interface ClientProtocol {
    * @throws IOException
    */
   @AtMostOnce
-  AddECPolicyResponse[] addErasureCodingPolicies(
+  AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 209eee7..0b443c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -1718,7 +1718,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies) throws IOException {
     List<ErasureCodingPolicyProto> protos = Arrays.stream(policies)
         .map(PBHelperClient::convertErasureCodingPolicy)
@@ -1729,9 +1729,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
     try {
       AddErasureCodingPoliciesResponseProto rep = rpcProxy
           .addErasureCodingPolicies(null, req);
-      AddECPolicyResponse[] responses = rep.getResponsesList().stream()
-          .map(PBHelperClient::convertAddECPolicyResponse)
-          .toArray(AddECPolicyResponse[]::new);
+      AddErasureCodingPolicyResponse[] responses =
+          rep.getResponsesList().stream()
+              .map(PBHelperClient::convertAddErasureCodingPolicyResponse)
+              .toArray(AddErasureCodingPolicyResponse[]::new);
       return responses;
     }  catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index d92d91e..db97031 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.inotify.Event;
 import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.BlockType;
@@ -137,7 +137,7 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptionS
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ZoneReencryptionStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddECPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto;
@@ -2981,10 +2981,11 @@ public class PBHelperClient {
     return builder.build();
   }
 
-  public static AddECPolicyResponseProto convertAddECPolicyResponse(
-      AddECPolicyResponse response) {
-    AddECPolicyResponseProto.Builder builder =
-        AddECPolicyResponseProto.newBuilder()
+  public static AddErasureCodingPolicyResponseProto
+      convertAddErasureCodingPolicyResponse(
+          AddErasureCodingPolicyResponse response) {
+    AddErasureCodingPolicyResponseProto.Builder builder =
+        AddErasureCodingPolicyResponseProto.newBuilder()
         .setPolicy(convertErasureCodingPolicy(response.getPolicy()))
         .setSucceed(response.isSucceed());
     if (!response.isSucceed()) {
@@ -2993,13 +2994,14 @@ public class PBHelperClient {
     return builder.build();
   }
 
-  public static AddECPolicyResponse convertAddECPolicyResponse(
-      AddECPolicyResponseProto proto) {
+  public static AddErasureCodingPolicyResponse
+      convertAddErasureCodingPolicyResponse(
+          AddErasureCodingPolicyResponseProto proto) {
     ErasureCodingPolicy policy = convertErasureCodingPolicy(proto.getPolicy());
     if (proto.getSucceed()) {
-      return new AddECPolicyResponse(policy);
+      return new AddErasureCodingPolicyResponse(policy);
     } else {
-      return new AddECPolicyResponse(policy, proto.getErrorMsg());
+      return new AddErasureCodingPolicyResponse(policy, proto.getErrorMsg());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 9f80350..5f5f66e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -58,7 +58,7 @@ message AddErasureCodingPoliciesRequestProto {
 }
 
 message AddErasureCodingPoliciesResponseProto {
-  repeated AddECPolicyResponseProto responses = 1;
+  repeated AddErasureCodingPolicyResponseProto responses = 1;
 }
 
 message RemoveErasureCodingPolicyRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index ddb5566..7769f68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -390,7 +390,7 @@ message ErasureCodingPolicyProto {
   optional ErasureCodingPolicyState state = 5 [default = ENABLED];
 }
 
-message AddECPolicyResponseProto {
+message AddErasureCodingPolicyResponseProto {
   required ErasureCodingPolicyProto policy = 1;
   required bool succeed = 2;
   optional string errorMsg = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index a79e75f..0527b3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.QuotaUsage;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -1721,15 +1721,16 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       ErasureCodingPolicy[] policies = request.getEcPoliciesList().stream()
           .map(PBHelperClient::convertErasureCodingPolicy)
           .toArray(ErasureCodingPolicy[]::new);
-      AddECPolicyResponse[] result = server
+      AddErasureCodingPolicyResponse[] result = server
           .addErasureCodingPolicies(policies);
 
-      List<HdfsProtos.AddECPolicyResponseProto> responseProtos = Arrays
-          .stream(result).map(PBHelperClient::convertAddECPolicyResponse)
-          .collect(Collectors.toList());
+      List<HdfsProtos.AddErasureCodingPolicyResponseProto> responseProtos =
+          Arrays.stream(result)
+              .map(PBHelperClient::convertAddErasureCodingPolicyResponse)
+              .collect(Collectors.toList());
       AddErasureCodingPoliciesResponseProto response =
           AddErasureCodingPoliciesResponseProto.newBuilder()
-            .addAllResponses(responseProtos).build();
+              .addAllResponses(responseProtos).build();
       return response;
     } catch (IOException e) {
       throw new ServiceException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 029557f..967f26b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -180,7 +180,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockType;
@@ -7193,12 +7193,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    *                      rebuilding
    * @return The according result of add operation.
    */
-  AddECPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] policies,
-      final boolean logRetryCache) throws IOException {
+  AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
+      ErasureCodingPolicy[] policies, final boolean logRetryCache)
+      throws IOException {
     final String operationName = "addErasureCodingPolicies";
     String addECPolicyName = "";
     checkOperation(OperationCategory.WRITE);
-    List<AddECPolicyResponse> responses = new ArrayList<>();
+    List<AddErasureCodingPolicyResponse> responses = new ArrayList<>();
     boolean success = false;
     writeLock();
     try {
@@ -7210,13 +7211,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
               FSDirErasureCodingOp.addErasureCodingPolicy(this, policy,
                   logRetryCache);
           addECPolicyName = newPolicy.getName();
-          responses.add(new AddECPolicyResponse(newPolicy));
+          responses.add(new AddErasureCodingPolicyResponse(newPolicy));
         } catch (HadoopIllegalArgumentException e) {
-          responses.add(new AddECPolicyResponse(policy, e));
+          responses.add(new AddErasureCodingPolicyResponse(policy, e));
         }
       }
       success = true;
-      return responses.toArray(new AddECPolicyResponse[0]);
+      return responses.toArray(new AddErasureCodingPolicyResponse[0]);
     } finally {
       writeUnlock(operationName);
       if (success) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d62c0f2..c07b521 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -85,7 +85,7 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -2333,17 +2333,18 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies) throws IOException {
     checkNNStartup();
     namesystem.checkSuperuserPrivilege();
     final CacheEntryWithPayload cacheEntry =
         RetryCache.waitForCompletion(retryCache, null);
     if (cacheEntry != null && cacheEntry.isSuccess()) {
-      return (AddECPolicyResponse[]) cacheEntry.getPayload();
+      return (AddErasureCodingPolicyResponse[]) cacheEntry.getPayload();
     }
     boolean success = false;
-    AddECPolicyResponse[] responses = new AddECPolicyResponse[0];
+    AddErasureCodingPolicyResponse[] responses =
+        new AddErasureCodingPolicyResponse[0];
     try {
       responses =
           namesystem.addErasureCodingPolicies(policies, cacheEntry != null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 0b4e0c2..652f78a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.util.ECPolicyLoader;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
@@ -175,9 +175,10 @@ public class ECAdmin extends Configured implements Tool {
         List<ErasureCodingPolicy> policies =
             new ECPolicyLoader().loadPolicy(filePath);
         if (policies.size() > 0) {
-          AddECPolicyResponse[] responses = dfs.addErasureCodingPolicies(
+          AddErasureCodingPolicyResponse[] responses =
+              dfs.addErasureCodingPolicies(
             policies.toArray(new ErasureCodingPolicy[policies.size()]));
-          for (AddECPolicyResponse response : responses) {
+          for (AddErasureCodingPolicyResponse response : responses) {
             System.out.println(response);
           }
         } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index c6fe1a2..17db352 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -108,7 +108,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@@ -1472,7 +1472,7 @@ public class DFSTestUtil {
     ErasureCodingPolicy newPolicy1 =
         new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA, 8 * 1024);
     ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[] {newPolicy1};
-    AddECPolicyResponse[] responses =
+    AddErasureCodingPolicyResponse[] responses =
         filesystem.addErasureCodingPolicies(policyArray);
     newPolicy1 = responses[0].getPolicy();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 4f2040b..2c2b05e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -686,7 +686,7 @@ public class TestErasureCodingPolicies {
     ErasureCodingPolicy newPolicy =
         new ErasureCodingPolicy(toAddSchema, 128 * 1024);
     ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
-    AddECPolicyResponse[] responses =
+    AddErasureCodingPolicyResponse[] responses =
         fs.addErasureCodingPolicies(policyArray);
     assertEquals(1, responses.length);
     assertFalse(responses[0].isSucceed());
@@ -839,7 +839,8 @@ public class TestErasureCodingPolicies {
         new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA, 8 * 1024);
     ErasureCodingPolicy[] policyArray =
         new ErasureCodingPolicy[] {newPolicy1};
-    AddECPolicyResponse[] responses = fs.addErasureCodingPolicies(policyArray);
+    AddErasureCodingPolicyResponse[] responses =
+        fs.addErasureCodingPolicies(policyArray);
     assertEquals(1, responses.length);
     assertTrue(responses[0].isSucceed());
     newPolicy1 = responses[0].getPolicy();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index 3559825..da56c15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocolPB;
 
 
 import com.google.protobuf.UninitializedMessageException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
@@ -913,14 +913,15 @@ public class TestPBHelper {
     // Check conversion of the built-in policies.
     for (ErasureCodingPolicy policy :
         SystemErasureCodingPolicies.getPolicies()) {
-      AddECPolicyResponse response = new AddECPolicyResponse(policy);
-      HdfsProtos.AddECPolicyResponseProto proto = PBHelperClient
-          .convertAddECPolicyResponse(response);
+      AddErasureCodingPolicyResponse response =
+          new AddErasureCodingPolicyResponse(policy);
+      HdfsProtos.AddErasureCodingPolicyResponseProto proto = PBHelperClient
+          .convertAddErasureCodingPolicyResponse(response);
       // Optional fields should not be set.
       assertFalse("Unnecessary field is set.", proto.hasErrorMsg());
       // Convert proto back to an object and check for equality.
-      AddECPolicyResponse convertedResponse = PBHelperClient
-          .convertAddECPolicyResponse(proto);
+      AddErasureCodingPolicyResponse convertedResponse = PBHelperClient
+          .convertAddErasureCodingPolicyResponse(proto);
       assertEquals("Converted policy not equal", response.getPolicy(),
           convertedResponse.getPolicy());
       assertEquals("Converted policy not equal", response.isSucceed(),
@@ -929,13 +930,13 @@ public class TestPBHelper {
 
     ErasureCodingPolicy policy = SystemErasureCodingPolicies
         .getPolicies().get(0);
-    AddECPolicyResponse response =
-        new AddECPolicyResponse(policy, "failed");
-    HdfsProtos.AddECPolicyResponseProto proto = PBHelperClient
-        .convertAddECPolicyResponse(response);
+    AddErasureCodingPolicyResponse response =
+        new AddErasureCodingPolicyResponse(policy, "failed");
+    HdfsProtos.AddErasureCodingPolicyResponseProto proto = PBHelperClient
+        .convertAddErasureCodingPolicyResponse(response);
     // Convert proto back to an object and check for equality.
-    AddECPolicyResponse convertedResponse = PBHelperClient
-        .convertAddECPolicyResponse(proto);
+    AddErasureCodingPolicyResponse convertedResponse = PBHelperClient
+        .convertAddErasureCodingPolicyResponse(proto);
     assertEquals("Converted policy not equal", response.getPolicy(),
         convertedResponse.getPolicy());
     assertEquals("Converted policy not equal", response.getErrorMsg(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index ec80bff..14d2e26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
@@ -735,7 +735,7 @@ public class TestFSEditLogLoader {
       ErasureCodingPolicy newPolicy =
           new ErasureCodingPolicy(schema, cellSize, (byte) 0);
       ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
-      AddECPolicyResponse[] responses =
+      AddErasureCodingPolicyResponse[] responses =
           fs.addErasureCodingPolicies(policyArray);
       assertEquals(1, responses.length);
       assertTrue(responses[0].isSucceed());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index c9d3255..d316c36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -35,7 +35,7 @@ import java.io.IOException;
 import java.util.EnumSet;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
@@ -847,7 +847,8 @@ public class TestFSImage {
       ErasureCodingPolicy newPolicy =
           new ErasureCodingPolicy(newSchema, 2 * 1024, (byte) 254);
       ErasureCodingPolicy[] policies = new ErasureCodingPolicy[]{newPolicy};
-      AddECPolicyResponse[] ret = fs.addErasureCodingPolicies(policies);
+      AddErasureCodingPolicyResponse[] ret =
+          fs.addErasureCodingPolicies(policies);
       assertEquals(1, ret.length);
       assertEquals(true, ret[0].isSucceed());
       newPolicy = ret[0].getPolicy();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] hadoop git commit: MAPREDUCE-6954. Disable erasure coding for files that are uploaded to the MR staging area (pbacsko via rkanter)

Posted by as...@apache.org.
MAPREDUCE-6954. Disable erasure coding for files that are uploaded to the MR staging area (pbacsko via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0adc0471
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0adc0471
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0adc0471

Branch: refs/heads/YARN-6592
Commit: 0adc0471d0c06f66a31060f270dcb50a7b4ffafa
Parents: 5f49668
Author: Robert Kanter <rk...@apache.org>
Authored: Mon Sep 18 10:40:06 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Mon Sep 18 10:40:06 2017 -0700

----------------------------------------------------------------------
 .../hadoop-mapreduce-client-core/pom.xml        |  4 ++
 .../hadoop/mapreduce/JobResourceUploader.java   | 17 ++++++++
 .../apache/hadoop/mapreduce/MRJobConfig.java    |  5 +++
 .../src/main/resources/mapred-default.xml       |  9 ++++
 .../mapreduce/TestJobResourceUploader.java      | 46 ++++++++++++++++++++
 5 files changed, 81 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index c34f7bd..ce5fdc8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -44,6 +44,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index f1cad57..d9bf988 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 
@@ -94,6 +96,11 @@ class JobResourceUploader {
         new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
     mkdirs(jtFs, submitJobDir, mapredSysPerms);
 
+    if (!conf.getBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
+        MRJobConfig.DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED)) {
+      disableErasureCodingForPath(jtFs, submitJobDir);
+    }
+
     Collection<String> files = conf.getStringCollection("tmpfiles");
     Collection<String> libjars = conf.getStringCollection("tmpjars");
     Collection<String> archives = conf.getStringCollection("tmparchives");
@@ -575,4 +582,14 @@ class JobResourceUploader {
     }
     return finalPath;
   }
+
+  private void disableErasureCodingForPath(FileSystem fs, Path path)
+      throws IOException {
+    if (jtFs instanceof DistributedFileSystem) {
+      LOG.info("Disabling Erasure Coding for path: " + path);
+      DistributedFileSystem dfs = (DistributedFileSystem) jtFs;
+      dfs.setErasureCodingPolicy(path,
+          SystemErasureCodingPolicies.getReplicationPolicy().getName());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 2023ba3..86abb42 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -1037,4 +1037,9 @@ public interface MRJobConfig {
   String FINISH_JOB_WHEN_REDUCERS_DONE =
       "mapreduce.job.finish-when-all-reducers-done";
   boolean DEFAULT_FINISH_JOB_WHEN_REDUCERS_DONE = true;
+
+  String MR_AM_STAGING_DIR_ERASURECODING_ENABLED =
+      MR_AM_STAGING_DIR + "erasurecoding.enabled";
+
+  boolean DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED = false;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index ee9b906..6b6faf2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1261,6 +1261,15 @@
 </property>
 
 <property>
+  <name>yarn.app.mapreduce.am.staging-dir.erasurecoding.enabled</name>
+  <value>false</value>
+  <description>Whether Erasure Coding should be enabled for
+  files that are copied to the MR staging area. This is a job-level
+  setting.
+  </description>
+</property>
+
+<property>
   <name>mapreduce.am.max-attempts</name>
   <value>2</value>
   <description>The maximum number of application attempts. It is a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index 20b7b7d..d0d7a34 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -20,6 +20,11 @@ package org.apache.hadoop.mapreduce;
 
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.times;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
 
 import java.io.IOException;
 import java.net.URI;
@@ -36,9 +41,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapred.JobConf;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.verification.VerificationMode;
 
 /**
  * A class for unit testing JobResourceUploader.
@@ -357,6 +365,40 @@ public class TestJobResourceUploader {
         expectedArchivesWithFrags, expectedJobJar);
   }
 
+  @Test
+  public void testErasureCodingDefault() throws IOException {
+    testErasureCodingSetting(true);
+  }
+
+  @Test
+  public void testErasureCodingDisabled() throws IOException {
+    testErasureCodingSetting(false);
+  }
+
+  private void testErasureCodingSetting(boolean defaultBehavior)
+      throws IOException {
+    JobConf jConf = new JobConf();
+    // don't set to false if EC remains disabled to check default setting
+    if (!defaultBehavior) {
+      jConf.setBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
+          true);
+    }
+
+    DistributedFileSystem fs = mock(DistributedFileSystem.class);
+    Path path = new Path("/");
+    when(fs.makeQualified(any(Path.class))).thenReturn(path);
+    JobResourceUploader uploader = new StubedUploader(fs, true);
+    Job job = Job.getInstance(jConf);
+
+    uploader.uploadResources(job, new Path("/test"));
+
+    String replicationPolicyName = SystemErasureCodingPolicies
+        .getReplicationPolicy().getName();
+    VerificationMode mode = defaultBehavior ? times(1) : never();
+    verify(fs, mode).setErasureCodingPolicy(eq(path),
+        eq(replicationPolicyName));
+  }
+
   private void runTmpResourcePathTest(JobResourceUploader uploader,
       ResourceConf rConf, JobConf jConf, String[] expectedFiles,
       String[] expectedArchives, String expectedJobJar) throws IOException {
@@ -698,6 +740,10 @@ public class TestJobResourceUploader {
       super(FileSystem.getLocal(conf), useWildcard);
     }
 
+    StubedUploader(FileSystem fs, boolean useWildcard) throws IOException {
+      super(fs, useWildcard);
+    }
+
     @Override
     FileStatus getFileStatus(Map<URI, FileStatus> statCache, Configuration job,
         Path p) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] hadoop git commit: HDFS-11799. Introduce a config to allow setting up write pipeline with fewer nodes than replication factor. Contributed by Brahma Reddy Battula

Posted by as...@apache.org.
HDFS-11799. Introduce a config to allow setting up write pipeline with fewer nodes than replication factor. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fda1221c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fda1221c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fda1221c

Branch: refs/heads/YARN-6592
Commit: fda1221c55101d97ac62e1ee4e3ddf9a915d5363
Parents: 31b5840
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Sep 19 11:25:45 2017 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Sep 19 11:25:45 2017 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  13 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java    |  31 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |   2 +
 .../src/main/resources/hdfs-default.xml         |  17 ++
 .../TestReplaceDatanodeFailureReplication.java  | 291 +++++++++++++++++++
 .../hadoop/tools/TestHdfsConfigFields.java      |   4 +-
 6 files changed, 354 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 772049d..7e8e95b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -223,6 +223,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   final String clientName;
   final SocketFactory socketFactory;
   final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
+  final short dtpReplaceDatanodeOnFailureReplication;
   private final FileSystem.Statistics stats;
   private final URI namenodeUri;
   private final Random r = new Random();
@@ -305,7 +306,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
     this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
     this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
-
+    this.dtpReplaceDatanodeOnFailureReplication = (short) conf
+        .getInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+                MIN_REPLICATION,
+            HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+                MIN_REPLICATION_DEFAULT);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(
+          "Sets " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+              MIN_REPLICATION + " to "
+              + dtpReplaceDatanodeOnFailureReplication);
+    }
     this.ugi = UserGroupInformation.getCurrentUser();
 
     this.namenodeUri = nameNodeUri;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 4eafca1..99fa5f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1384,7 +1384,36 @@ class DataStreamer extends Daemon {
       setPipeline(lb);
 
       //find the new datanode
-      final int d = findNewDatanode(original);
+      final int d;
+      try {
+        d = findNewDatanode(original);
+      } catch (IOException ioe) {
+        // check the minimal number of nodes available to decide whether to
+        // continue the write.
+
+        //if live block location datanodes is greater than or equal to
+        // HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+        // MIN_REPLICATION threshold value, continue writing to the
+        // remaining nodes. Otherwise throw exception.
+        //
+        // If HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+        // MIN_REPLICATION is set to 0 or less than zero, an exception will be
+        // thrown if a replacement could not be found.
+
+        if (dfsClient.dtpReplaceDatanodeOnFailureReplication > 0 && nodes.length
+            >= dfsClient.dtpReplaceDatanodeOnFailureReplication) {
+          DFSClient.LOG.warn(
+              "Failed to find a new datanode to add to the write pipeline, "
+                  + " continue to write to the pipeline with " + nodes.length
+                  + " nodes since it's no less than minimum replication: "
+                  + dfsClient.dtpReplaceDatanodeOnFailureReplication
+                  + " configured by "
+                  + BlockWrite.ReplaceDatanodeOnFailure.MIN_REPLICATION
+                  + ".", ioe);
+          return;
+        }
+        throw ioe;
+      }
       //transfer replica. pick a source from the original nodes
       final DatanodeInfo src = original[tried % original.length];
       final DatanodeInfo[] targets = {nodes[d]};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index e99b099..97cb68b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -320,6 +320,8 @@ public interface HdfsClientConfigKeys {
       String  POLICY_DEFAULT = "DEFAULT";
       String  BEST_EFFORT_KEY = PREFIX + "best-effort";
       boolean BEST_EFFORT_DEFAULT = false;
+      String MIN_REPLICATION = PREFIX + "min-replication";
+      short MIN_REPLICATION_DEFAULT = 0;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index af40a34..9327a2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -658,6 +658,23 @@
   </description>
 </property>
 
+  <property>
+    <name>dfs.client.block.write.replace-datanode-on-failure.min-replication</name>
+    <value>0</value>
+    <description>
+      The minimum number of replications that are needed to not to fail
+      the write pipeline if new datanodes can not be found to replace
+      failed datanodes (could be due to network failure) in the write pipeline.
+      If the number of the remaining datanodes in the write pipeline is greater
+      than or equal to this property value, continue writing to the remaining nodes.
+      Otherwise throw exception.
+
+      If this is set to 0, an exception will be thrown, when a replacement
+      can not be found.
+      See also dfs.client.block.write.replace-datanode-on-failure.policy
+    </description>
+  </property>
+
 <property>
   <name>dfs.blockreport.intervalMsec</name>
   <value>21600000</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
new file mode 100644
index 0000000..9591cb4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
+import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Verify the behaviours of HdfsClientConfigKeys.BlockWrite.
+ * ReplaceDatanodeOnFailure.MIN_REPLICATION.if live block location datanodes is
+ * greater than or equal to
+ * 'dfs.client.block.write.replace-datanode-on-failure.min.replication'
+ * threshold value, if yes continue writing to the two remaining nodes.
+ * Otherwise it will throw exception.
+ * <p>
+ * If this HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ * MIN_REPLICATION is set to 0 or less than zero, an exception will be thrown
+ * if a replacement could not be found.
+ */
+public class TestReplaceDatanodeFailureReplication {
+  static final Log LOG = LogFactory
+      .getLog(TestReplaceDatanodeFailureReplication.class);
+
+  static final String DIR =
+      "/" + TestReplaceDatanodeFailureReplication.class.getSimpleName() + "/";
+  static final short REPLICATION = 3;
+  final private static String RACK0 = "/rack0";
+
+  /**
+   * Test fail last datanode in the pipeline.
+   */
+  @Test
+  public void testLastDatanodeFailureInPipeline() throws Exception {
+    testWriteFileAndVerifyAfterDNStop(2, 1, 10, false);
+  }
+
+  /**
+   * Test fail first datanode in the pipeline.
+   */
+  @Test
+  public void testFirstDatanodeFailureInPipeline() throws Exception {
+    testWriteFileAndVerifyAfterDNStop(2, 0, 10, false);
+  }
+
+  /**
+   * Test fail all the datanodes except first in the pipeline.
+   */
+  @Test
+  public void testWithOnlyFirstDatanodeIsAlive() throws Exception {
+    testWriteFileAndVerifyAfterDNStop(1, 1, 1, true);
+  }
+
+  /**
+   * Test fail all the datanodes except lastnode in the pipeline.
+   */
+  @Test
+  public void testWithOnlyLastDatanodeIsAlive() throws Exception {
+    testWriteFileAndVerifyAfterDNStop(1, 0, 1, true);
+  }
+
+  /**
+   * Test when number of live nodes are less than the
+   * "dfs.client.block.write.replace-datanode-on-failure.min.replication".
+   */
+  @Test
+  public void testLessNumberOfLiveDatanodesThanWriteReplaceDatanodeOnFailureRF()
+      throws Exception {
+    final MiniDFSCluster cluster = setupCluster(2);
+
+    try {
+      final DistributedFileSystem fs = cluster.getFileSystem();
+      final Path dir = new Path(DIR);
+
+      final SlowWriter[] slowwriters = new SlowWriter[1];
+      for (int i = 1; i <= slowwriters.length; i++) {
+        // create slow writers in different speed
+        slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i),
+            i * 200L);
+      }
+
+      for (SlowWriter s : slowwriters) {
+        s.start();
+      }
+
+      // Let slow writers write something.
+      // Some of them are too slow and will be not yet started.
+      sleepSeconds(1);
+
+      // stop an old datanode
+      cluster.stopDataNode(0);
+      cluster.stopDataNode(0);
+
+      // Let the slow writer writes a few more seconds
+      // Everyone should have written something.
+      sleepSeconds(20);
+
+      // check replication and interrupt.
+      for (SlowWriter s : slowwriters) {
+        try {
+          s.out.getCurrentBlockReplication();
+          Assert.fail(
+              "Must throw exception as failed to add a new datanode for write "
+                  + "pipeline, minimum failure replication");
+        } catch (IOException e) {
+          // expected
+        }
+        s.interruptRunning();
+      }
+
+      // close files
+      for (SlowWriter s : slowwriters) {
+        s.joinAndClose();
+      }
+
+      // Verify the file
+      verifyFileContent(fs, slowwriters);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private MiniDFSCluster setupCluster(int failRF) throws IOException {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+        MIN_REPLICATION, failRF);
+    // always replace a datanode
+    ReplaceDatanodeOnFailure.write(Policy.ALWAYS, false, conf);
+
+    final String[] racks = new String[REPLICATION];
+    Arrays.fill(racks, RACK0);
+    return new MiniDFSCluster.Builder(conf).racks(racks)
+        .numDataNodes(REPLICATION).build();
+  }
+
+  private void testWriteFileAndVerifyAfterDNStop(int failRF, int dnindex,
+      int slowWrites, boolean failPipeLine)
+      throws IOException, InterruptedException, TimeoutException {
+    final MiniDFSCluster cluster = setupCluster(failRF);
+    try {
+      final DistributedFileSystem fs = cluster.getFileSystem();
+      final Path dir = new Path(DIR);
+
+      final SlowWriter[] slowwriters = new SlowWriter[slowWrites];
+      for (int i = 1; i <= slowwriters.length; i++) {
+        // create slow writers in different speed
+        slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i),
+            i * 200L);
+      }
+
+      for (SlowWriter s : slowwriters) {
+        s.start();
+      }
+
+      // Let slow writers write something.
+      // Some of them are too slow and will be not yet started.
+      sleepSeconds(3);
+
+      // stop an datanode
+      cluster.stopDataNode(dnindex);
+      if (failPipeLine) {
+        cluster.stopDataNode(dnindex);
+      }
+
+      // Let the slow writer writes a few more seconds
+      // Everyone should have written something.
+      sleepSeconds(5);
+      cluster.waitFirstBRCompleted(0, 10000);
+      // check replication and interrupt.
+      for (SlowWriter s : slowwriters) {
+        Assert.assertEquals(failRF, s.out.getCurrentBlockReplication());
+        s.interruptRunning();
+      }
+
+      // close files
+      for (SlowWriter s : slowwriters) {
+        s.joinAndClose();
+      }
+
+      // Verify the file
+      verifyFileContent(fs, slowwriters);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private void verifyFileContent(DistributedFileSystem fs,
+      SlowWriter[] slowwriters) throws IOException {
+    LOG.info("Verify the file");
+    for (int i = 0; i < slowwriters.length; i++) {
+      LOG.info(slowwriters[i].filepath + ": length=" + fs
+          .getFileStatus(slowwriters[i].filepath).getLen());
+      FSDataInputStream in = null;
+      try {
+        in = fs.open(slowwriters[i].filepath);
+        for (int j = 0, x;; j++) {
+          x = in.read();
+          if ((x) != -1) {
+            Assert.assertEquals(j, x);
+          } else {
+            return;
+          }
+        }
+      } finally {
+        IOUtils.closeStream(in);
+      }
+    }
+  }
+
+  static void sleepSeconds(final int waittime) throws InterruptedException {
+    LOG.info("Wait " + waittime + " seconds");
+    Thread.sleep(waittime * 1000L);
+  }
+
+  static class SlowWriter extends Thread {
+    private final Path filepath;
+    private final HdfsDataOutputStream out;
+    private final long sleepms;
+    private volatile boolean running = true;
+
+    SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms)
+        throws IOException {
+      super(SlowWriter.class.getSimpleName() + ":" + filepath);
+      this.filepath = filepath;
+      this.out = (HdfsDataOutputStream) fs.create(filepath, REPLICATION);
+      this.sleepms = sleepms;
+    }
+
+    @Override public void run() {
+      int i = 0;
+      try {
+        sleep(sleepms);
+        for (; running; i++) {
+          LOG.info(getName() + " writes " + i);
+          out.write(i);
+          out.hflush();
+          sleep(sleepms);
+        }
+      } catch (InterruptedException e) {
+        LOG.info(getName() + " interrupted:" + e);
+      } catch (IOException e) {
+        throw new RuntimeException(getName(), e);
+      } finally {
+        LOG.info(getName() + " terminated: i=" + i);
+      }
+    }
+
+    void interruptRunning() {
+      running = false;
+      interrupt();
+    }
+
+    void joinAndClose() throws InterruptedException {
+      LOG.info(getName() + " join and close");
+      join();
+      IOUtils.closeStream(out);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 233dc5a..47db565 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -41,8 +41,8 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
   public void initializeMemberVariables() {
     xmlFilename = new String("hdfs-default.xml");
     configurationClasses = new Class[] { HdfsClientConfigKeys.class,
-        HdfsClientConfigKeys.StripedRead.class,
-        DFSConfigKeys.class};
+        HdfsClientConfigKeys.StripedRead.class, DFSConfigKeys.class,
+        HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.class };
 
     // Set error modes
     errorIfMissingConfigProps = true;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] hadoop git commit: YARN-6570. No logs were found for running application, running container. Contributed by Junping Du

Posted by as...@apache.org.
YARN-6570. No logs were found for running application, running
container. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c732924
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c732924
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c732924

Branch: refs/heads/YARN-6592
Commit: 7c732924a889cd280e972882619a1827877fbafa
Parents: 29dd551
Author: Xuan <xg...@apache.org>
Authored: Mon Sep 18 14:04:05 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Sep 18 14:04:05 2017 -0700

----------------------------------------------------------------------
 .../nodemanager/containermanager/container/ContainerImpl.java     | 1 +
 .../org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java  | 3 ++-
 .../nodemanager/containermanager/container/TestContainer.java     | 3 +++
 3 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index df107a7..836e70e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -616,6 +616,7 @@ public class ContainerImpl implements Container {
   public org.apache.hadoop.yarn.api.records.ContainerState getCurrentState() {
     switch (stateMachine.getCurrentState()) {
     case NEW:
+      return org.apache.hadoop.yarn.api.records.ContainerState.NEW;
     case LOCALIZING:
     case LOCALIZATION_FAILED:
     case SCHEDULED:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index 8e4522b..9e59449 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -159,7 +159,8 @@ public class TestEventFlow {
     containerManager.startContainers(allRequests);
 
     BaseContainerManagerTest.waitForContainerState(containerManager, cID,
-        Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED), 20);
+        Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED,
+            ContainerState.NEW), 20);
 
     List<ContainerId> containerIds = new ArrayList<ContainerId>();
     containerIds.add(cID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 64e6cf0..b44b500 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -122,6 +122,8 @@ public class TestContainer {
     try {
       wc = new WrappedContainer(7, 314159265358979L, 4344, "yak");
       assertEquals(ContainerState.NEW, wc.c.getContainerState());
+      ContainerImpl container = (ContainerImpl)wc.c;
+      assertEquals(org.apache.hadoop.yarn.api.records.ContainerState.NEW, container.getCurrentState());
       wc.initContainer();
 
       // Verify request for public/private resources to localizer
@@ -131,6 +133,7 @@ public class TestContainer {
               LocalResourceVisibility.APPLICATION));
       verify(wc.localizerBus).handle(argThat(matchesReq));
       assertEquals(ContainerState.LOCALIZING, wc.c.getContainerState());
+      assertEquals(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED, container.getCurrentState());
     }
     finally {
       if (wc != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org